blob: 9a98d632fff2eb3fc67311d89b92d33b3653f3ce [file] [log] [blame]
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8" />
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta http-equiv="x-ua-compatible" content="ie=edge">
<style>
.dropdown {
position: relative;
display: inline-block;
}
.dropdown-content {
display: none;
position: absolute;
background-color: #f9f9f9;
min-width: 160px;
box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2);
padding: 12px 16px;
z-index: 1;
text-align: left;
}
.dropdown:hover .dropdown-content {
display: block;
}
.dropdown-option:hover {
color: #FF4500;
}
.dropdown-option-active {
color: #FF4500;
font-weight: lighter;
}
.dropdown-option {
color: #000000;
font-weight: lighter;
}
.dropdown-header {
color: #FFFFFF;
display: inline-flex;
}
.dropdown-caret {
width: 18px;
}
.dropdown-caret-path {
fill: #FFFFFF;
}
</style>
<title>mxnet.optimizer.optimizer &#8212; Apache MXNet documentation</title>
<link rel="stylesheet" href="../../../_static/basic.css" type="text/css" />
<link rel="stylesheet" href="../../../_static/pygments.css" type="text/css" />
<link rel="stylesheet" type="text/css" href="../../../_static/mxnet.css" />
<link rel="stylesheet" href="../../../_static/material-design-lite-1.3.0/material.blue-deep_orange.min.css" type="text/css" />
<link rel="stylesheet" href="../../../_static/sphinx_materialdesign_theme.css" type="text/css" />
<link rel="stylesheet" href="../../../_static/fontawesome/all.css" type="text/css" />
<link rel="stylesheet" href="../../../_static/fonts.css" type="text/css" />
<link rel="stylesheet" href="../../../_static/feedback.css" type="text/css" />
<script id="documentation_options" data-url_root="../../../" src="../../../_static/documentation_options.js"></script>
<script src="../../../_static/jquery.js"></script>
<script src="../../../_static/underscore.js"></script>
<script src="../../../_static/doctools.js"></script>
<script src="../../../_static/language_data.js"></script>
<script src="../../../_static/matomo_analytics.js"></script>
<script src="../../../_static/autodoc.js"></script>
<script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js"></script>
<script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
<script type="text/x-mathjax-config">MathJax.Hub.Config({"tex2jax": {"inlineMath": [["$", "$"], ["\\(", "\\)"]], "processEscapes": true, "ignoreClass": "document", "processClass": "math|output_area"}})</script>
<link rel="shortcut icon" href="../../../_static/mxnet-icon.png"/>
<link rel="index" title="Index" href="../../../genindex.html" />
<link rel="search" title="Search" href="../../../search.html" />
</head>
<body><header class="site-header" role="banner">
<div class="wrapper">
<a class="site-title" rel="author" href="/versions/1.9.1/"><img
src="../../../_static/mxnet_logo.png" class="site-header-logo"></a>
<nav class="site-nav">
<input type="checkbox" id="nav-trigger" class="nav-trigger"/>
<label for="nav-trigger">
<span class="menu-icon">
<svg viewBox="0 0 18 15" width="18px" height="15px">
<path d="M18,1.484c0,0.82-0.665,1.484-1.484,1.484H1.484C0.665,2.969,0,2.304,0,1.484l0,0C0,0.665,0.665,0,1.484,0 h15.032C17.335,0,18,0.665,18,1.484L18,1.484z M18,7.516C18,8.335,17.335,9,16.516,9H1.484C0.665,9,0,8.335,0,7.516l0,0 c0-0.82,0.665-1.484,1.484-1.484h15.032C17.335,6.031,18,6.696,18,7.516L18,7.516z M18,13.516C18,14.335,17.335,15,16.516,15H1.484 C0.665,15,0,14.335,0,13.516l0,0c0-0.82,0.665-1.483,1.484-1.483h15.032C17.335,12.031,18,12.695,18,13.516L18,13.516z"/>
</svg>
</span>
</label>
<div class="trigger">
<a class="page-link" href="/versions/1.9.1/get_started">Get Started</a>
<a class="page-link" href="/versions/1.9.1/features">Features</a>
<a class="page-link" href="/versions/1.9.1/ecosystem">Ecosystem</a>
<a class="page-link page-current" href="/versions/1.9.1/api">Docs & Tutorials</a>
<a class="page-link" href="/versions/1.9.1/trusted_by">Trusted By</a>
<a class="page-link" href="https://github.com/apache/incubator-mxnet">GitHub</a>
<div class="dropdown" style="min-width:100px">
<span class="dropdown-header">Apache
<svg class="dropdown-caret" viewBox="0 0 32 32" class="icon icon-caret-bottom" aria-hidden="true"><path class="dropdown-caret-path" d="M24 11.305l-7.997 11.39L8 11.305z"></path></svg>
</span>
<div class="dropdown-content" style="min-width:250px">
<a href="https://www.apache.org/foundation/">Apache Software Foundation</a>
<a href="https://incubator.apache.org/">Apache Incubator</a>
<a href="https://www.apache.org/licenses/">License</a>
<a href="/versions/1.9.1/api/faq/security.html">Security</a>
<a href="https://privacy.apache.org/policies/privacy-policy-public.html">Privacy</a>
<a href="https://www.apache.org/events/current-event">Events</a>
<a href="https://www.apache.org/foundation/sponsorship.html">Sponsorship</a>
<a href="https://www.apache.org/foundation/thanks.html">Thanks</a>
</div>
</div>
<div class="dropdown">
<span class="dropdown-header">1.9.1
<svg class="dropdown-caret" viewBox="0 0 32 32" class="icon icon-caret-bottom" aria-hidden="true"><path class="dropdown-caret-path" d="M24 11.305l-7.997 11.39L8 11.305z"></path></svg>
</span>
<div class="dropdown-content">
<a class="dropdown-option" href="/">master</a><br>
<a class="dropdown-option-active" href="/versions/1.9.1/">1.9.1</a><br>
<a class="dropdown-option" href="/versions/1.8.0/">1.8.0</a><br>
<a class="dropdown-option" href="/versions/1.7.0/">1.7.0</a><br>
<a class="dropdown-option" href="/versions/1.6.0/">1.6.0</a><br>
<a class="dropdown-option" href="/versions/1.5.0/">1.5.0</a><br>
<a class="dropdown-option" href="/versions/1.4.1/">1.4.1</a><br>
<a class="dropdown-option" href="/versions/1.3.1/">1.3.1</a><br>
<a class="dropdown-option" href="/versions/1.2.1/">1.2.1</a><br>
<a class="dropdown-option" href="/versions/1.1.0/">1.1.0</a><br>
<a class="dropdown-option" href="/versions/1.0.0/">1.0.0</a><br>
<a class="dropdown-option" href="/versions/0.12.1/">0.12.1</a><br>
<a class="dropdown-option" href="/versions/0.11.0/">0.11.0</a>
</div>
</div>
</div>
</nav>
</div>
</header>
<div class="mdl-layout mdl-js-layout mdl-layout--fixed-header mdl-layout--fixed-drawer"><header class="mdl-layout__header mdl-layout__header--waterfall ">
<div class="mdl-layout__header-row">
<nav class="mdl-navigation breadcrumb">
<a class="mdl-navigation__link" href="../../index.html">Module code</a><i class="material-icons">navigate_next</i>
<a class="mdl-navigation__link is-active">mxnet.optimizer.optimizer</a>
</nav>
<div class="mdl-layout-spacer"></div>
<nav class="mdl-navigation">
<form class="form-inline pull-sm-right" action="../../../search.html" method="get">
<div class="mdl-textfield mdl-js-textfield mdl-textfield--expandable mdl-textfield--floating-label mdl-textfield--align-right">
<label id="quick-search-icon" class="mdl-button mdl-js-button mdl-button--icon" for="waterfall-exp">
<i class="material-icons">search</i>
</label>
<div class="mdl-textfield__expandable-holder">
<input class="mdl-textfield__input" type="text" name="q" id="waterfall-exp" placeholder="Search" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</div>
</div>
<div class="mdl-tooltip" data-mdl-for="quick-search-icon">
Quick search
</div>
</form>
</nav>
</div>
<div class="mdl-layout__header-row header-links">
<div class="mdl-layout-spacer"></div>
<nav class="mdl-navigation">
</nav>
</div>
</header><header class="mdl-layout__drawer">
<div class="globaltoc">
<span class="mdl-layout-title toc">Table Of Contents</span>
<nav class="mdl-navigation">
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../tutorials/index.html">Python Tutorials</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../../../tutorials/getting-started/index.html">Getting Started</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/getting-started/crash-course/index.html">Crash Course</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/getting-started/crash-course/1-ndarray.html">Manipulate data with <code class="docutils literal notranslate"><span class="pre">ndarray</span></code></a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/getting-started/crash-course/2-nn.html">Create a neural network</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/getting-started/crash-course/3-autograd.html">Automatic differentiation with <code class="docutils literal notranslate"><span class="pre">autograd</span></code></a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/getting-started/crash-course/4-train.html">Train the neural network</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/getting-started/crash-course/5-predict.html">Predict with a pre-trained model</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/getting-started/crash-course/6-use_gpus.html">Use GPUs</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/getting-started/to-mxnet/index.html">Moving to MXNet from Other Frameworks</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/getting-started/to-mxnet/pytorch.html">PyTorch vs Apache MXNet</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/getting-started/gluon_from_experiment_to_deployment.html">Gluon: from experiment to deployment</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/getting-started/logistic_regression_explained.html">Logistic regression explained</a></li>
<li class="toctree-l3"><a class="reference external" href="https://mxnet.apache.org/api/python/docs/tutorials/packages/gluon/image/mnist.html">MNIST</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../../tutorials/packages/index.html">Packages</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/packages/autograd/index.html">Automatic Differentiation</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/packages/gluon/index.html">Gluon</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/gluon/blocks/index.html">Blocks</a><ul>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/blocks/custom-layer.html">Custom Layers</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/blocks/custom_layer_beginners.html">Customer Layers (Beginners)</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/blocks/hybridize.html">Hybridize</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/blocks/init.html">Initialization</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/blocks/naming.html">Parameter and Block Naming</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/blocks/nn.html">Layers and Blocks</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/blocks/parameters.html">Parameter Management</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/blocks/save_load_params.html">Saving and Loading Gluon Models</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/blocks/activations/activations.html">Activation Blocks</a></li>
</ul>
</li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/gluon/data/index.html">Data Tutorials</a><ul>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/data/data_augmentation.html">Image Augmentation</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/data/data_augmentation.html#Spatial-Augmentation">Spatial Augmentation</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/data/data_augmentation.html#Color-Augmentation">Color Augmentation</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/data/data_augmentation.html#Composed-Augmentations">Composed Augmentations</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/data/datasets.html">Gluon <code class="docutils literal notranslate"><span class="pre">Dataset</span></code>s and <code class="docutils literal notranslate"><span class="pre">DataLoader</span></code></a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/data/datasets.html#Using-own-data-with-included-Datasets">Using own data with included <code class="docutils literal notranslate"><span class="pre">Dataset</span></code>s</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/data/datasets.html#Using-own-data-with-custom-Datasets">Using own data with custom <code class="docutils literal notranslate"><span class="pre">Dataset</span></code>s</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/data/datasets.html#Appendix:-Upgrading-from-Module-DataIter-to-Gluon-DataLoader">Appendix: Upgrading from Module <code class="docutils literal notranslate"><span class="pre">DataIter</span></code> to Gluon <code class="docutils literal notranslate"><span class="pre">DataLoader</span></code></a></li>
</ul>
</li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/gluon/image/index.html">Image Tutorials</a><ul>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/image/image-augmentation.html">Image Augmentation</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/image/info_gan.html">Image similarity search with InfoGAN</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/image/mnist.html">Handwritten Digit Recognition</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/image/pretrained_models.html">Using pre-trained models in MXNet</a></li>
</ul>
</li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/gluon/loss/index.html">Losses</a><ul>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/loss/custom-loss.html">Custom Loss Blocks</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/loss/kl_divergence.html">Kullback-Leibler (KL) Divergence</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/loss/loss.html">Loss functions</a></li>
</ul>
</li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/gluon/text/index.html">Text Tutorials</a><ul>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/text/gnmt.html">Google Neural Machine Translation</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/text/transformer.html">Machine Translation with Transformer</a></li>
</ul>
</li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/gluon/training/index.html">Training</a><ul>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/training/fit_api_tutorial.html">MXNet Gluon Fit API</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/training/trainer.html">Trainer</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/training/learning_rates/index.html">Learning Rates</a><ul>
<li class="toctree-l6"><a class="reference internal" href="../../../tutorials/packages/gluon/training/learning_rates/learning_rate_finder.html">Learning Rate Finder</a></li>
<li class="toctree-l6"><a class="reference internal" href="../../../tutorials/packages/gluon/training/learning_rates/learning_rate_schedules.html">Learning Rate Schedules</a></li>
<li class="toctree-l6"><a class="reference internal" href="../../../tutorials/packages/gluon/training/learning_rates/learning_rate_schedules_advanced.html">Advanced Learning Rate Schedules</a></li>
</ul>
</li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/training/normalization/index.html">Normalization Blocks</a></li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/packages/kvstore/index.html">KVStore</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/kvstore/kvstore.html">Distributed Key-Value Store</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/packages/ndarray/index.html">NDArray</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/ndarray/01-ndarray-intro.html">An Intro: Manipulate Data the MXNet Way with NDArray</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/ndarray/02-ndarray-operations.html">NDArray Operations</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/ndarray/03-ndarray-contexts.html">NDArray Contexts</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/ndarray/gotchas_numpy_in_mxnet.html">Gotchas using NumPy in Apache MXNet</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/ndarray/sparse/index.html">Tutorials</a><ul>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/ndarray/sparse/csr.html">CSRNDArray - NDArray in Compressed Sparse Row Storage Format</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/ndarray/sparse/row_sparse.html">RowSparseNDArray - NDArray for Sparse Gradient Updates</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/ndarray/sparse/train.html">Train a Linear Regression Model with Sparse Symbols</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/ndarray/sparse/train_gluon.html">Sparse NDArrays with Gluon</a></li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/packages/onnx/index.html">ONNX</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/onnx/fine_tuning_gluon.html">Fine-tuning an ONNX model</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/onnx/inference_on_onnx_model.html">Running inference on MXNet/Gluon from an ONNX model</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/onnx/super_resolution.html">Importing an ONNX model into MXNet</a></li>
<li class="toctree-l4"><a class="reference external" href="https://mxnet.apache.org/api/python/docs/tutorials/deploy/export/onnx.html">Export ONNX Models</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/packages/optimizer/index.html">Optimizers</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/packages/viz/index.html">Visualization</a><ul>
<li class="toctree-l4"><a class="reference external" href="https://mxnet.apache.org/api/faq/visualize_graph">Visualize networks</a></li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../../tutorials/performance/index.html">Performance</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/performance/compression/index.html">Compression</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/performance/compression/int8.html">Deploy with int-8</a></li>
<li class="toctree-l4"><a class="reference external" href="https://mxnet.apache.org/api/faq/float16">Float16</a></li>
<li class="toctree-l4"><a class="reference external" href="https://mxnet.apache.org/api/faq/gradient_compression">Gradient Compression</a></li>
<li class="toctree-l4"><a class="reference external" href="https://gluon-cv.mxnet.io/build/examples_deployment/int8_inference.html">GluonCV with Quantized Models</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/performance/backend/index.html">Accelerated Backend Tools</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/performance/backend/mkldnn/index.html">Intel MKL-DNN</a><ul>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/performance/backend/mkldnn/mkldnn_quantization.html">Quantize with MKL-DNN backend</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/performance/backend/mkldnn/mkldnn_quantization.html#Improving-accuracy-with-Intel®-Neural-Compressor">Improving accuracy with Intel® Neural Compressor</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/performance/backend/mkldnn/mkldnn_readme.html">Install MXNet with MKL-DNN</a></li>
</ul>
</li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/performance/backend/tensorrt/index.html">TensorRT</a><ul>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/performance/backend/tensorrt/tensorrt.html">Optimizing Deep Learning Computation Graphs with TensorRT</a></li>
</ul>
</li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/performance/backend/tvm.html">Use TVM</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/performance/backend/profiler.html">Profiling MXNet Models</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/performance/backend/amp.html">Using AMP: Automatic Mixed Precision</a></li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../../tutorials/deploy/index.html">Deployment</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/deploy/export/index.html">Export</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/deploy/export/onnx.html">Exporting to ONNX format</a></li>
<li class="toctree-l4"><a class="reference external" href="https://gluon-cv.mxnet.io/build/examples_deployment/export_network.html">Export Gluon CV Models</a></li>
<li class="toctree-l4"><a class="reference external" href="https://mxnet.apache.org/api/python/docs/tutorials/packages/gluon/blocks/save_load_params.html">Save / Load Parameters</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/deploy/inference/index.html">Inference</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/deploy/inference/cpp.html">Deploy into C++</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/deploy/inference/image_classification_jetson.html">Image Classication using pretrained ResNet-50 model on Jetson module</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/deploy/inference/scala.html">Deploy into a Java or Scala Environment</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/deploy/inference/wine_detector.html">Real-time Object Detection with MXNet On The Raspberry Pi</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/deploy/run-on-aws/index.html">Run on AWS</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/deploy/run-on-aws/use_ec2.html">Run on an EC2 Instance</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/deploy/run-on-aws/use_sagemaker.html">Run on Amazon SageMaker</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/deploy/run-on-aws/cloud.html">MXNet on the Cloud</a></li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../../tutorials/extend/index.html">Extend</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/extend/custom_layer.html">Custom Layers</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/extend/customop.html">Custom Numpy Operators</a></li>
<li class="toctree-l3"><a class="reference external" href="https://mxnet.apache.org/api/faq/new_op">New Operator Creation</a></li>
<li class="toctree-l3"><a class="reference external" href="https://mxnet.apache.org/api/faq/add_op_in_backend">New Operator in MXNet Backend</a></li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../../../api/index.html">Python API</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../../../api/ndarray/index.html">mxnet.ndarray</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../api/ndarray/ndarray.html">ndarray</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/ndarray/contrib/index.html">ndarray.contrib</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/ndarray/image/index.html">ndarray.image</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/ndarray/linalg/index.html">ndarray.linalg</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/ndarray/op/index.html">ndarray.op</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/ndarray/random/index.html">ndarray.random</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/ndarray/register/index.html">ndarray.register</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/ndarray/sparse/index.html">ndarray.sparse</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/ndarray/utils/index.html">ndarray.utils</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/gluon/index.html">mxnet.gluon</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/block.html">gluon.Block</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/hybrid_block.html">gluon.HybridBlock</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/symbol_block.html">gluon.SymbolBlock</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/constant.html">gluon.Constant</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/parameter.html">gluon.Parameter</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/parameter_dict.html">gluon.ParameterDict</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/trainer.html">gluon.Trainer</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/contrib/index.html">gluon.contrib</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/data/index.html">gluon.data</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../api/gluon/data/vision/index.html">data.vision</a><ul>
<li class="toctree-l5"><a class="reference internal" href="../../../api/gluon/data/vision/datasets/index.html">vision.datasets</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../api/gluon/data/vision/transforms/index.html">vision.transforms</a></li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/loss/index.html">gluon.loss</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/model_zoo/index.html">gluon.model_zoo.vision</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/nn/index.html">gluon.nn</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/rnn/index.html">gluon.rnn</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/utils/index.html">gluon.utils</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/autograd/index.html">mxnet.autograd</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/initializer/index.html">mxnet.initializer</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/optimizer/index.html">mxnet.optimizer</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/lr_scheduler/index.html">mxnet.lr_scheduler</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/metric/index.html">mxnet.metric</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/kvstore/index.html">mxnet.kvstore</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/symbol/index.html">mxnet.symbol</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../api/symbol/symbol.html">symbol</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/symbol/contrib/index.html">symbol.contrib</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/symbol/image/index.html">symbol.image</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/symbol/linalg/index.html">symbol.linalg</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/symbol/op/index.html">symbol.op</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/symbol/random/index.html">symbol.random</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/symbol/register/index.html">symbol.register</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/symbol/sparse/index.html">symbol.sparse</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/module/index.html">mxnet.module</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/contrib/index.html">mxnet.contrib</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../api/contrib/autograd/index.html">contrib.autograd</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/contrib/io/index.html">contrib.io</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/contrib/ndarray/index.html">contrib.ndarray</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/contrib/onnx/index.html">contrib.onnx</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/contrib/quantization/index.html">contrib.quantization</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/contrib/symbol/index.html">contrib.symbol</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/contrib/tensorboard/index.html">contrib.tensorboard</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/contrib/tensorrt/index.html">contrib.tensorrt</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/contrib/text/index.html">contrib.text</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/mxnet/index.html">mxnet</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/attribute/index.html">mxnet.attribute</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/base/index.html">mxnet.base</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/callback/index.html">mxnet.callback</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/context/index.html">mxnet.context</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/engine/index.html">mxnet.engine</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/executor/index.html">mxnet.executor</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/executor_manager/index.html">mxnet.executor_manager</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/image/index.html">mxnet.image</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/io/index.html">mxnet.io</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/kvstore_server/index.html">mxnet.kvstore_server</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/libinfo/index.html">mxnet.libinfo</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/log/index.html">mxnet.log</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/model/index.html">mxnet.model</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/monitor/index.html">mxnet.monitor</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/name/index.html">mxnet.name</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/notebook/index.html">mxnet.notebook</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/operator/index.html">mxnet.operator</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/profiler/index.html">mxnet.profiler</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/random/index.html">mxnet.random</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/recordio/index.html">mxnet.recordio</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/registry/index.html">mxnet.registry</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/rtc/index.html">mxnet.rtc</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/runtime/index.html">mxnet.runtime</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/test_utils/index.html">mxnet.test_utils</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/torch/index.html">mxnet.torch</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/util/index.html">mxnet.util</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/visualization/index.html">mxnet.visualization</a></li>
</ul>
</li>
</ul>
</li>
</ul>
</nav>
</div>
</header>
<main class="mdl-layout__content" tabIndex="0">
<script type="text/javascript" src="../../../_static/sphinx_materialdesign_theme.js "></script>
<script type="text/javascript" src="../../../_static/feedback.js"></script>
<header class="mdl-layout__drawer">
<div class="globaltoc">
<span class="mdl-layout-title toc">Table Of Contents</span>
<nav class="mdl-navigation">
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../tutorials/index.html">Python Tutorials</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../../../tutorials/getting-started/index.html">Getting Started</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/getting-started/crash-course/index.html">Crash Course</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/getting-started/crash-course/1-ndarray.html">Manipulate data with <code class="docutils literal notranslate"><span class="pre">ndarray</span></code></a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/getting-started/crash-course/2-nn.html">Create a neural network</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/getting-started/crash-course/3-autograd.html">Automatic differentiation with <code class="docutils literal notranslate"><span class="pre">autograd</span></code></a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/getting-started/crash-course/4-train.html">Train the neural network</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/getting-started/crash-course/5-predict.html">Predict with a pre-trained model</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/getting-started/crash-course/6-use_gpus.html">Use GPUs</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/getting-started/to-mxnet/index.html">Moving to MXNet from Other Frameworks</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/getting-started/to-mxnet/pytorch.html">PyTorch vs Apache MXNet</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/getting-started/gluon_from_experiment_to_deployment.html">Gluon: from experiment to deployment</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/getting-started/logistic_regression_explained.html">Logistic regression explained</a></li>
<li class="toctree-l3"><a class="reference external" href="https://mxnet.apache.org/api/python/docs/tutorials/packages/gluon/image/mnist.html">MNIST</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../../tutorials/packages/index.html">Packages</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/packages/autograd/index.html">Automatic Differentiation</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/packages/gluon/index.html">Gluon</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/gluon/blocks/index.html">Blocks</a><ul>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/blocks/custom-layer.html">Custom Layers</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/blocks/custom_layer_beginners.html">Customer Layers (Beginners)</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/blocks/hybridize.html">Hybridize</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/blocks/init.html">Initialization</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/blocks/naming.html">Parameter and Block Naming</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/blocks/nn.html">Layers and Blocks</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/blocks/parameters.html">Parameter Management</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/blocks/save_load_params.html">Saving and Loading Gluon Models</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/blocks/activations/activations.html">Activation Blocks</a></li>
</ul>
</li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/gluon/data/index.html">Data Tutorials</a><ul>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/data/data_augmentation.html">Image Augmentation</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/data/data_augmentation.html#Spatial-Augmentation">Spatial Augmentation</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/data/data_augmentation.html#Color-Augmentation">Color Augmentation</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/data/data_augmentation.html#Composed-Augmentations">Composed Augmentations</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/data/datasets.html">Gluon <code class="docutils literal notranslate"><span class="pre">Dataset</span></code>s and <code class="docutils literal notranslate"><span class="pre">DataLoader</span></code></a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/data/datasets.html#Using-own-data-with-included-Datasets">Using own data with included <code class="docutils literal notranslate"><span class="pre">Dataset</span></code>s</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/data/datasets.html#Using-own-data-with-custom-Datasets">Using own data with custom <code class="docutils literal notranslate"><span class="pre">Dataset</span></code>s</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/data/datasets.html#Appendix:-Upgrading-from-Module-DataIter-to-Gluon-DataLoader">Appendix: Upgrading from Module <code class="docutils literal notranslate"><span class="pre">DataIter</span></code> to Gluon <code class="docutils literal notranslate"><span class="pre">DataLoader</span></code></a></li>
</ul>
</li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/gluon/image/index.html">Image Tutorials</a><ul>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/image/image-augmentation.html">Image Augmentation</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/image/info_gan.html">Image similarity search with InfoGAN</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/image/mnist.html">Handwritten Digit Recognition</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/image/pretrained_models.html">Using pre-trained models in MXNet</a></li>
</ul>
</li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/gluon/loss/index.html">Losses</a><ul>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/loss/custom-loss.html">Custom Loss Blocks</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/loss/kl_divergence.html">Kullback-Leibler (KL) Divergence</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/loss/loss.html">Loss functions</a></li>
</ul>
</li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/gluon/text/index.html">Text Tutorials</a><ul>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/text/gnmt.html">Google Neural Machine Translation</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/text/transformer.html">Machine Translation with Transformer</a></li>
</ul>
</li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/gluon/training/index.html">Training</a><ul>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/training/fit_api_tutorial.html">MXNet Gluon Fit API</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/training/trainer.html">Trainer</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/training/learning_rates/index.html">Learning Rates</a><ul>
<li class="toctree-l6"><a class="reference internal" href="../../../tutorials/packages/gluon/training/learning_rates/learning_rate_finder.html">Learning Rate Finder</a></li>
<li class="toctree-l6"><a class="reference internal" href="../../../tutorials/packages/gluon/training/learning_rates/learning_rate_schedules.html">Learning Rate Schedules</a></li>
<li class="toctree-l6"><a class="reference internal" href="../../../tutorials/packages/gluon/training/learning_rates/learning_rate_schedules_advanced.html">Advanced Learning Rate Schedules</a></li>
</ul>
</li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/gluon/training/normalization/index.html">Normalization Blocks</a></li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/packages/kvstore/index.html">KVStore</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/kvstore/kvstore.html">Distributed Key-Value Store</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/packages/ndarray/index.html">NDArray</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/ndarray/01-ndarray-intro.html">An Intro: Manipulate Data the MXNet Way with NDArray</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/ndarray/02-ndarray-operations.html">NDArray Operations</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/ndarray/03-ndarray-contexts.html">NDArray Contexts</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/ndarray/gotchas_numpy_in_mxnet.html">Gotchas using NumPy in Apache MXNet</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/ndarray/sparse/index.html">Tutorials</a><ul>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/ndarray/sparse/csr.html">CSRNDArray - NDArray in Compressed Sparse Row Storage Format</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/ndarray/sparse/row_sparse.html">RowSparseNDArray - NDArray for Sparse Gradient Updates</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/ndarray/sparse/train.html">Train a Linear Regression Model with Sparse Symbols</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/packages/ndarray/sparse/train_gluon.html">Sparse NDArrays with Gluon</a></li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/packages/onnx/index.html">ONNX</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/onnx/fine_tuning_gluon.html">Fine-tuning an ONNX model</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/onnx/inference_on_onnx_model.html">Running inference on MXNet/Gluon from an ONNX model</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/packages/onnx/super_resolution.html">Importing an ONNX model into MXNet</a></li>
<li class="toctree-l4"><a class="reference external" href="https://mxnet.apache.org/api/python/docs/tutorials/deploy/export/onnx.html">Export ONNX Models</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/packages/optimizer/index.html">Optimizers</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/packages/viz/index.html">Visualization</a><ul>
<li class="toctree-l4"><a class="reference external" href="https://mxnet.apache.org/api/faq/visualize_graph">Visualize networks</a></li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../../tutorials/performance/index.html">Performance</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/performance/compression/index.html">Compression</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/performance/compression/int8.html">Deploy with int-8</a></li>
<li class="toctree-l4"><a class="reference external" href="https://mxnet.apache.org/api/faq/float16">Float16</a></li>
<li class="toctree-l4"><a class="reference external" href="https://mxnet.apache.org/api/faq/gradient_compression">Gradient Compression</a></li>
<li class="toctree-l4"><a class="reference external" href="https://gluon-cv.mxnet.io/build/examples_deployment/int8_inference.html">GluonCV with Quantized Models</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/performance/backend/index.html">Accelerated Backend Tools</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/performance/backend/mkldnn/index.html">Intel MKL-DNN</a><ul>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/performance/backend/mkldnn/mkldnn_quantization.html">Quantize with MKL-DNN backend</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/performance/backend/mkldnn/mkldnn_quantization.html#Improving-accuracy-with-Intel®-Neural-Compressor">Improving accuracy with Intel® Neural Compressor</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/performance/backend/mkldnn/mkldnn_readme.html">Install MXNet with MKL-DNN</a></li>
</ul>
</li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/performance/backend/tensorrt/index.html">TensorRT</a><ul>
<li class="toctree-l5"><a class="reference internal" href="../../../tutorials/performance/backend/tensorrt/tensorrt.html">Optimizing Deep Learning Computation Graphs with TensorRT</a></li>
</ul>
</li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/performance/backend/tvm.html">Use TVM</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/performance/backend/profiler.html">Profiling MXNet Models</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/performance/backend/amp.html">Using AMP: Automatic Mixed Precision</a></li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../../tutorials/deploy/index.html">Deployment</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/deploy/export/index.html">Export</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/deploy/export/onnx.html">Exporting to ONNX format</a></li>
<li class="toctree-l4"><a class="reference external" href="https://gluon-cv.mxnet.io/build/examples_deployment/export_network.html">Export Gluon CV Models</a></li>
<li class="toctree-l4"><a class="reference external" href="https://mxnet.apache.org/api/python/docs/tutorials/packages/gluon/blocks/save_load_params.html">Save / Load Parameters</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/deploy/inference/index.html">Inference</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/deploy/inference/cpp.html">Deploy into C++</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/deploy/inference/image_classification_jetson.html">Image Classication using pretrained ResNet-50 model on Jetson module</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/deploy/inference/scala.html">Deploy into a Java or Scala Environment</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/deploy/inference/wine_detector.html">Real-time Object Detection with MXNet On The Raspberry Pi</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/deploy/run-on-aws/index.html">Run on AWS</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/deploy/run-on-aws/use_ec2.html">Run on an EC2 Instance</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/deploy/run-on-aws/use_sagemaker.html">Run on Amazon SageMaker</a></li>
<li class="toctree-l4"><a class="reference internal" href="../../../tutorials/deploy/run-on-aws/cloud.html">MXNet on the Cloud</a></li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../../tutorials/extend/index.html">Extend</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/extend/custom_layer.html">Custom Layers</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../tutorials/extend/customop.html">Custom Numpy Operators</a></li>
<li class="toctree-l3"><a class="reference external" href="https://mxnet.apache.org/api/faq/new_op">New Operator Creation</a></li>
<li class="toctree-l3"><a class="reference external" href="https://mxnet.apache.org/api/faq/add_op_in_backend">New Operator in MXNet Backend</a></li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../../../api/index.html">Python API</a><ul>
<li class="toctree-l2"><a class="reference internal" href="../../../api/ndarray/index.html">mxnet.ndarray</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../api/ndarray/ndarray.html">ndarray</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/ndarray/contrib/index.html">ndarray.contrib</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/ndarray/image/index.html">ndarray.image</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/ndarray/linalg/index.html">ndarray.linalg</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/ndarray/op/index.html">ndarray.op</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/ndarray/random/index.html">ndarray.random</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/ndarray/register/index.html">ndarray.register</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/ndarray/sparse/index.html">ndarray.sparse</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/ndarray/utils/index.html">ndarray.utils</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/gluon/index.html">mxnet.gluon</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/block.html">gluon.Block</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/hybrid_block.html">gluon.HybridBlock</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/symbol_block.html">gluon.SymbolBlock</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/constant.html">gluon.Constant</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/parameter.html">gluon.Parameter</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/parameter_dict.html">gluon.ParameterDict</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/trainer.html">gluon.Trainer</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/contrib/index.html">gluon.contrib</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/data/index.html">gluon.data</a><ul>
<li class="toctree-l4"><a class="reference internal" href="../../../api/gluon/data/vision/index.html">data.vision</a><ul>
<li class="toctree-l5"><a class="reference internal" href="../../../api/gluon/data/vision/datasets/index.html">vision.datasets</a></li>
<li class="toctree-l5"><a class="reference internal" href="../../../api/gluon/data/vision/transforms/index.html">vision.transforms</a></li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/loss/index.html">gluon.loss</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/model_zoo/index.html">gluon.model_zoo.vision</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/nn/index.html">gluon.nn</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/rnn/index.html">gluon.rnn</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/gluon/utils/index.html">gluon.utils</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/autograd/index.html">mxnet.autograd</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/initializer/index.html">mxnet.initializer</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/optimizer/index.html">mxnet.optimizer</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/lr_scheduler/index.html">mxnet.lr_scheduler</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/metric/index.html">mxnet.metric</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/kvstore/index.html">mxnet.kvstore</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/symbol/index.html">mxnet.symbol</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../api/symbol/symbol.html">symbol</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/symbol/contrib/index.html">symbol.contrib</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/symbol/image/index.html">symbol.image</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/symbol/linalg/index.html">symbol.linalg</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/symbol/op/index.html">symbol.op</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/symbol/random/index.html">symbol.random</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/symbol/register/index.html">symbol.register</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/symbol/sparse/index.html">symbol.sparse</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/module/index.html">mxnet.module</a></li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/contrib/index.html">mxnet.contrib</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../api/contrib/autograd/index.html">contrib.autograd</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/contrib/io/index.html">contrib.io</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/contrib/ndarray/index.html">contrib.ndarray</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/contrib/onnx/index.html">contrib.onnx</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/contrib/quantization/index.html">contrib.quantization</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/contrib/symbol/index.html">contrib.symbol</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/contrib/tensorboard/index.html">contrib.tensorboard</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/contrib/tensorrt/index.html">contrib.tensorrt</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/contrib/text/index.html">contrib.text</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../../api/mxnet/index.html">mxnet</a><ul>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/attribute/index.html">mxnet.attribute</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/base/index.html">mxnet.base</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/callback/index.html">mxnet.callback</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/context/index.html">mxnet.context</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/engine/index.html">mxnet.engine</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/executor/index.html">mxnet.executor</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/executor_manager/index.html">mxnet.executor_manager</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/image/index.html">mxnet.image</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/io/index.html">mxnet.io</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/kvstore_server/index.html">mxnet.kvstore_server</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/libinfo/index.html">mxnet.libinfo</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/log/index.html">mxnet.log</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/model/index.html">mxnet.model</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/monitor/index.html">mxnet.monitor</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/name/index.html">mxnet.name</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/notebook/index.html">mxnet.notebook</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/operator/index.html">mxnet.operator</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/profiler/index.html">mxnet.profiler</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/random/index.html">mxnet.random</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/recordio/index.html">mxnet.recordio</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/registry/index.html">mxnet.registry</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/rtc/index.html">mxnet.rtc</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/runtime/index.html">mxnet.runtime</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/test_utils/index.html">mxnet.test_utils</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/torch/index.html">mxnet.torch</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/util/index.html">mxnet.util</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../../api/mxnet/visualization/index.html">mxnet.visualization</a></li>
</ul>
</li>
</ul>
</li>
</ul>
</nav>
</div>
</header>
<div class="document">
<div class="page-content" role="main">
<h1>Source code for mxnet.optimizer.optimizer</h1><div class="highlight"><pre>
<span></span><span class="c1"># coding: utf-8</span>
<span class="c1"># Licensed to the Apache Software Foundation (ASF) under one</span>
<span class="c1"># or more contributor license agreements. See the NOTICE file</span>
<span class="c1"># distributed with this work for additional information</span>
<span class="c1"># regarding copyright ownership. The ASF licenses this file</span>
<span class="c1"># to you under the Apache License, Version 2.0 (the</span>
<span class="c1"># &quot;License&quot;); you may not use this file except in compliance</span>
<span class="c1"># with the License. You may obtain a copy of the License at</span>
<span class="c1">#</span>
<span class="c1"># http://www.apache.org/licenses/LICENSE-2.0</span>
<span class="c1">#</span>
<span class="c1"># Unless required by applicable law or agreed to in writing,</span>
<span class="c1"># software distributed under the License is distributed on an</span>
<span class="c1"># &quot;AS IS&quot; BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY</span>
<span class="c1"># KIND, either express or implied. See the License for the</span>
<span class="c1"># specific language governing permissions and limitations</span>
<span class="c1"># under the License.</span>
<span class="c1"># pylint: disable=too-many-lines</span>
<span class="sd">&quot;&quot;&quot;Weight updating functions.&quot;&quot;&quot;</span>
<span class="kn">import</span> <span class="nn">logging</span>
<span class="kn">import</span> <span class="nn">math</span>
<span class="kn">import</span> <span class="nn">pickle</span>
<span class="kn">import</span> <span class="nn">warnings</span>
<span class="kn">import</span> <span class="nn">os</span>
<span class="kn">import</span> <span class="nn">numpy</span>
<span class="kn">from</span> <span class="nn">..base</span> <span class="kn">import</span> <span class="n">py_str</span>
<span class="kn">from</span> <span class="nn">..ndarray</span> <span class="kn">import</span> <span class="p">(</span><span class="n">NDArray</span><span class="p">,</span> <span class="n">zeros</span><span class="p">,</span> <span class="n">clip</span><span class="p">,</span> <span class="n">sqrt</span><span class="p">,</span> <span class="n">cast</span><span class="p">,</span> <span class="n">maximum</span><span class="p">,</span> <span class="nb">abs</span> <span class="k">as</span> <span class="n">NDabs</span><span class="p">,</span> <span class="n">array</span><span class="p">,</span> <span class="n">multiply</span><span class="p">,</span>
<span class="n">multi_sum_sq</span><span class="p">,</span> <span class="n">multi_lars</span><span class="p">,</span> <span class="n">norm</span> <span class="k">as</span> <span class="n">NDnorm</span><span class="p">)</span>
<span class="kn">from</span> <span class="nn">..ndarray</span> <span class="kn">import</span> <span class="p">(</span><span class="n">sgd_update</span><span class="p">,</span> <span class="n">sgd_mom_update</span><span class="p">,</span> <span class="n">adam_update</span><span class="p">,</span> <span class="n">rmsprop_update</span><span class="p">,</span> <span class="n">rmspropalex_update</span><span class="p">,</span>
<span class="n">mp_sgd_update</span><span class="p">,</span> <span class="n">mp_sgd_mom_update</span><span class="p">,</span> <span class="n">square</span><span class="p">,</span> <span class="n">ftrl_update</span><span class="p">,</span> <span class="n">ftml_update</span><span class="p">,</span>
<span class="n">signsgd_update</span><span class="p">,</span> <span class="n">signum_update</span><span class="p">,</span> <span class="n">nag_mom_update</span><span class="p">,</span> <span class="n">mp_nag_mom_update</span><span class="p">,</span>
<span class="n">multi_sgd_update</span><span class="p">,</span> <span class="n">multi_sgd_mom_update</span><span class="p">,</span> <span class="n">multi_mp_sgd_update</span><span class="p">,</span>
<span class="n">multi_mp_sgd_mom_update</span><span class="p">,</span> <span class="n">preloaded_multi_sgd_update</span><span class="p">,</span>
<span class="n">preloaded_multi_sgd_mom_update</span><span class="p">,</span> <span class="n">preloaded_multi_mp_sgd_update</span><span class="p">,</span>
<span class="n">preloaded_multi_mp_sgd_mom_update</span><span class="p">,</span> <span class="n">lamb_update_phase1</span><span class="p">,</span> <span class="n">lamb_update_phase2</span><span class="p">,</span>
<span class="n">mp_lamb_update_phase1</span><span class="p">,</span> <span class="n">mp_lamb_update_phase2</span><span class="p">)</span>
<span class="kn">from</span> <span class="nn">..ndarray.contrib</span> <span class="kn">import</span> <span class="p">(</span><span class="n">multi_lamb_update</span><span class="p">,</span> <span class="n">multi_mp_lamb_update</span><span class="p">)</span>
<span class="kn">from</span> <span class="nn">..ndarray</span> <span class="kn">import</span> <span class="n">sparse</span>
<span class="kn">from</span> <span class="nn">..random</span> <span class="kn">import</span> <span class="n">normal</span>
<span class="kn">from</span> <span class="nn">..util</span> <span class="kn">import</span> <span class="n">is_np_array</span>
<span class="n">__all__</span> <span class="o">=</span> <span class="p">[</span>
<span class="s1">&#39;AdaDelta&#39;</span><span class="p">,</span> <span class="s1">&#39;AdaGrad&#39;</span><span class="p">,</span> <span class="s1">&#39;Adam&#39;</span><span class="p">,</span> <span class="s1">&#39;Adamax&#39;</span><span class="p">,</span> <span class="s1">&#39;DCASGD&#39;</span><span class="p">,</span> <span class="s1">&#39;FTML&#39;</span><span class="p">,</span> <span class="s1">&#39;Ftrl&#39;</span><span class="p">,</span> <span class="s1">&#39;LARS&#39;</span><span class="p">,</span> <span class="s1">&#39;LBSGD&#39;</span><span class="p">,</span>
<span class="s1">&#39;NAG&#39;</span><span class="p">,</span> <span class="s1">&#39;NDabs&#39;</span><span class="p">,</span> <span class="s1">&#39;Nadam&#39;</span><span class="p">,</span> <span class="s1">&#39;Optimizer&#39;</span><span class="p">,</span> <span class="s1">&#39;RMSProp&#39;</span><span class="p">,</span> <span class="s1">&#39;SGD&#39;</span><span class="p">,</span> <span class="s1">&#39;SGLD&#39;</span><span class="p">,</span> <span class="s1">&#39;Signum&#39;</span><span class="p">,</span> <span class="s1">&#39;LAMB&#39;</span><span class="p">,</span>
<span class="s1">&#39;Test&#39;</span><span class="p">,</span> <span class="s1">&#39;Updater&#39;</span><span class="p">,</span> <span class="s1">&#39;ccSGD&#39;</span><span class="p">,</span> <span class="s1">&#39;create&#39;</span><span class="p">,</span> <span class="s1">&#39;get_updater&#39;</span><span class="p">,</span> <span class="s1">&#39;register&#39;</span>
<span class="p">]</span>
<span class="k">def</span> <span class="nf">_flatten_list</span><span class="p">(</span><span class="n">nested_list</span><span class="p">):</span>
<span class="k">return</span> <span class="p">[</span><span class="n">item</span> <span class="k">for</span> <span class="n">sublist</span> <span class="ow">in</span> <span class="n">nested_list</span> <span class="k">for</span> <span class="n">item</span> <span class="ow">in</span> <span class="n">sublist</span><span class="p">]</span>
<div class="viewcode-block" id="Optimizer"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Optimizer">[docs]</a><span class="k">class</span> <span class="nc">Optimizer</span><span class="p">(</span><span class="nb">object</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;The base class inherited by all optimizers.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> rescale_grad : float, optional, default 1.0</span>
<span class="sd"> Multiply the gradient with `rescale_grad` before updating. Often</span>
<span class="sd"> choose to be ``1.0/batch_size``.</span>
<span class="sd"> param_idx2name : dict from int to string, optional, default None</span>
<span class="sd"> A dictionary that maps int index to string name.</span>
<span class="sd"> clip_gradient : float, optional, default None</span>
<span class="sd"> Clip the gradient by projecting onto the box ``[-clip_gradient, clip_gradient]``.</span>
<span class="sd"> learning_rate : float, optional, default None</span>
<span class="sd"> The initial learning rate. If None, the optimization will use the</span>
<span class="sd"> learning rate from ``lr_scheduler``. If not None, it will overwrite</span>
<span class="sd"> the learning rate in ``lr_scheduler``. If None and ``lr_scheduler``</span>
<span class="sd"> is also None, then it will be set to 0.01 by default.</span>
<span class="sd"> lr_scheduler : LRScheduler, optional, default None</span>
<span class="sd"> The learning rate scheduler.</span>
<span class="sd"> wd : float, optional, default 0.0</span>
<span class="sd"> The weight decay (or L2 regularization) coefficient. Modifies objective</span>
<span class="sd"> by adding a penalty for having large weights.</span>
<span class="sd"> sym: Symbol, optional, default None</span>
<span class="sd"> The Symbol this optimizer is applying to.</span>
<span class="sd"> begin_num_update : int, optional, default 0</span>
<span class="sd"> The initial number of updates.</span>
<span class="sd"> multi_precision : bool, optional, default False</span>
<span class="sd"> Flag to control the internal precision of the optimizer.</span>
<span class="sd"> False: results in using the same precision as the weights (default),</span>
<span class="sd"> True: makes internal 32-bit copy of the weights and applies gradients</span>
<span class="sd"> in 32-bit precision even if actual weights used in the model have lower precision.</span>
<span class="sd"> Turning this on can improve convergence and accuracy when training with float16.</span>
<span class="sd"> param_dict : dict of int -&gt; gluon.Parameter, default None</span>
<span class="sd"> Dictionary of parameter index to gluon.Parameter, used to lookup parameter attributes</span>
<span class="sd"> such as lr_mult, wd_mult, etc. param_dict shall not be deep copied.</span>
<span class="sd"> Properties</span>
<span class="sd"> ----------</span>
<span class="sd"> learning_rate : float</span>
<span class="sd"> The current learning rate of the optimizer. Given an Optimizer object</span>
<span class="sd"> optimizer, its learning rate can be accessed as optimizer.learning_rate.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">rescale_grad</span><span class="o">=</span><span class="mf">1.</span><span class="p">,</span> <span class="n">param_idx2name</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="mf">0.</span><span class="p">,</span>
<span class="n">clip_gradient</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">learning_rate</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="n">lr_scheduler</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">sym</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">begin_num_update</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span>
<span class="n">multi_precision</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">param_dict</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">rescale_grad</span> <span class="o">=</span> <span class="n">rescale_grad</span>
<span class="bp">self</span><span class="o">.</span><span class="n">lr_scheduler</span> <span class="o">=</span> <span class="n">lr_scheduler</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">lr_scheduler</span> <span class="ow">is</span> <span class="kc">None</span> <span class="ow">and</span> <span class="n">learning_rate</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">learning_rate</span> <span class="o">=</span> <span class="mf">0.01</span>
<span class="bp">self</span><span class="o">.</span><span class="n">lr</span> <span class="o">=</span> <span class="n">learning_rate</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">lr_scheduler</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="ow">and</span> <span class="n">learning_rate</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">lr_scheduler</span><span class="o">.</span><span class="n">base_lr</span> <span class="o">!=</span> <span class="n">learning_rate</span><span class="p">:</span>
<span class="nb">print</span><span class="p">(</span><span class="ne">UserWarning</span><span class="p">(</span><span class="s2">&quot;learning rate from ``lr_scheduler`` has been &quot;</span>
<span class="s2">&quot;overwritten by ``learning_rate`` in optimizer.&quot;</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">lr_scheduler</span><span class="o">.</span><span class="n">base_lr</span> <span class="o">=</span> <span class="n">learning_rate</span>
<span class="bp">self</span><span class="o">.</span><span class="n">wd</span> <span class="o">=</span> <span class="n">wd</span>
<span class="bp">self</span><span class="o">.</span><span class="n">lr_mult</span> <span class="o">=</span> <span class="p">{}</span>
<span class="bp">self</span><span class="o">.</span><span class="n">wd_mult</span> <span class="o">=</span> <span class="p">{}</span>
<span class="bp">self</span><span class="o">.</span><span class="n">begin_num_update</span> <span class="o">=</span> <span class="n">begin_num_update</span>
<span class="bp">self</span><span class="o">.</span><span class="n">num_update</span> <span class="o">=</span> <span class="n">begin_num_update</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_all_index_update_counts</span> <span class="o">=</span> <span class="p">{</span><span class="mi">0</span> <span class="p">:</span> <span class="p">{}}</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_index_update_count</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_all_index_update_counts</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
<span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span> <span class="o">=</span> <span class="n">clip_gradient</span>
<span class="bp">self</span><span class="o">.</span><span class="n">multi_precision</span> <span class="o">=</span> <span class="n">multi_precision</span>
<span class="bp">self</span><span class="o">.</span><span class="n">aggregate_num</span> <span class="o">=</span> <span class="mi">0</span>
<span class="k">if</span> <span class="n">param_idx2name</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">param_idx2name</span> <span class="o">=</span> <span class="p">{}</span>
<span class="k">assert</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">param_idx2name</span><span class="p">,</span> <span class="nb">dict</span><span class="p">),</span> \
<span class="s1">&#39;param_idx2name should be a dict of param indexes to names.&#39;</span>
<span class="bp">self</span><span class="o">.</span><span class="n">idx2name</span> <span class="o">=</span> <span class="n">param_idx2name</span><span class="o">.</span><span class="n">copy</span><span class="p">()</span>
<span class="bp">self</span><span class="o">.</span><span class="n">sym_info</span> <span class="o">=</span> <span class="p">(</span><span class="n">sym</span><span class="o">.</span><span class="n">attr_dict</span><span class="p">(),</span> <span class="n">sym</span><span class="o">.</span><span class="n">list_arguments</span><span class="p">())</span> <span class="k">if</span> <span class="n">sym</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="k">else</span> <span class="p">()</span>
<span class="bp">self</span><span class="o">.</span><span class="n">param_dict</span> <span class="o">=</span> <span class="n">param_dict</span> <span class="k">if</span> <span class="n">param_dict</span> <span class="k">else</span> <span class="p">{}</span>
<span class="bp">self</span><span class="o">.</span><span class="n">allow_np_array</span> <span class="o">=</span> <span class="n">is_np_array</span><span class="p">()</span>
<span class="bp">self</span><span class="o">.</span><span class="n">set_lr_mult</span><span class="p">({})</span>
<span class="bp">self</span><span class="o">.</span><span class="n">set_wd_mult</span><span class="p">({})</span>
<span class="n">opt_registry</span> <span class="o">=</span> <span class="p">{}</span>
<div class="viewcode-block" id="Optimizer.register"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Optimizer.register">[docs]</a> <span class="nd">@staticmethod</span>
<span class="k">def</span> <span class="nf">register</span><span class="p">(</span><span class="n">klass</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Registers a new optimizer.</span>
<span class="sd"> Once an optimizer is registered, we can create an instance of this</span>
<span class="sd"> optimizer with `create_optimizer` later.</span>
<span class="sd"> Examples</span>
<span class="sd"> --------</span>
<span class="sd"> &gt;&gt;&gt; @mx.optimizer.Optimizer.register</span>
<span class="sd"> ... class MyOptimizer(mx.optimizer.Optimizer):</span>
<span class="sd"> ... pass</span>
<span class="sd"> &gt;&gt;&gt; optim = mx.optimizer.Optimizer.create_optimizer(&#39;MyOptimizer&#39;)</span>
<span class="sd"> &gt;&gt;&gt; print(type(optim))</span>
<span class="sd"> &lt;class &#39;__main__.MyOptimizer&#39;&gt;</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">klass</span><span class="p">,</span> <span class="nb">type</span><span class="p">))</span>
<span class="n">name</span> <span class="o">=</span> <span class="n">klass</span><span class="o">.</span><span class="vm">__name__</span><span class="o">.</span><span class="n">lower</span><span class="p">()</span>
<span class="k">if</span> <span class="n">name</span> <span class="ow">in</span> <span class="n">Optimizer</span><span class="o">.</span><span class="n">opt_registry</span><span class="p">:</span>
<span class="n">warnings</span><span class="o">.</span><span class="n">warn</span><span class="p">(</span><span class="s1">&#39;WARNING: New optimizer </span><span class="si">%s</span><span class="s1">.</span><span class="si">%s</span><span class="s1"> is overriding &#39;</span>
<span class="s1">&#39;existing optimizer </span><span class="si">%s</span><span class="s1">.</span><span class="si">%s</span><span class="s1">&#39;</span> <span class="o">%</span>
<span class="p">(</span><span class="n">klass</span><span class="o">.</span><span class="vm">__module__</span><span class="p">,</span> <span class="n">klass</span><span class="o">.</span><span class="vm">__name__</span><span class="p">,</span>
<span class="n">Optimizer</span><span class="o">.</span><span class="n">opt_registry</span><span class="p">[</span><span class="n">name</span><span class="p">]</span><span class="o">.</span><span class="vm">__module__</span><span class="p">,</span>
<span class="n">Optimizer</span><span class="o">.</span><span class="n">opt_registry</span><span class="p">[</span><span class="n">name</span><span class="p">]</span><span class="o">.</span><span class="vm">__name__</span><span class="p">))</span>
<span class="n">Optimizer</span><span class="o">.</span><span class="n">opt_registry</span><span class="p">[</span><span class="n">name</span><span class="p">]</span> <span class="o">=</span> <span class="n">klass</span>
<span class="k">return</span> <span class="n">klass</span></div>
<div class="viewcode-block" id="Optimizer.create_optimizer"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Optimizer.create_optimizer">[docs]</a> <span class="nd">@staticmethod</span>
<span class="k">def</span> <span class="nf">create_optimizer</span><span class="p">(</span><span class="n">name</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Instantiates an optimizer with a given name and kwargs.</span>
<span class="sd"> .. note:: We can use the alias `create` for ``Optimizer.create_optimizer``.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> name: str</span>
<span class="sd"> Name of the optimizer. Should be the name</span>
<span class="sd"> of a subclass of Optimizer. Case insensitive.</span>
<span class="sd"> kwargs: dict</span>
<span class="sd"> Parameters for the optimizer.</span>
<span class="sd"> Returns</span>
<span class="sd"> -------</span>
<span class="sd"> Optimizer</span>
<span class="sd"> An instantiated optimizer.</span>
<span class="sd"> Examples</span>
<span class="sd"> --------</span>
<span class="sd"> &gt;&gt;&gt; sgd = mx.optimizer.Optimizer.create_optimizer(&#39;sgd&#39;)</span>
<span class="sd"> &gt;&gt;&gt; type(sgd)</span>
<span class="sd"> &lt;class &#39;mxnet.optimizer.SGD&#39;&gt;</span>
<span class="sd"> &gt;&gt;&gt; adam = mx.optimizer.create(&#39;adam&#39;, learning_rate=.1)</span>
<span class="sd"> &gt;&gt;&gt; type(adam)</span>
<span class="sd"> &lt;class &#39;mxnet.optimizer.Adam&#39;&gt;</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">if</span> <span class="n">name</span><span class="o">.</span><span class="n">lower</span><span class="p">()</span> <span class="ow">in</span> <span class="n">Optimizer</span><span class="o">.</span><span class="n">opt_registry</span><span class="p">:</span>
<span class="k">return</span> <span class="n">Optimizer</span><span class="o">.</span><span class="n">opt_registry</span><span class="p">[</span><span class="n">name</span><span class="o">.</span><span class="n">lower</span><span class="p">()](</span><span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="s1">&#39;Cannot find optimizer </span><span class="si">%s</span><span class="s1">&#39;</span> <span class="o">%</span> <span class="n">name</span><span class="p">)</span></div>
<span class="nd">@property</span>
<span class="k">def</span> <span class="nf">learning_rate</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">lr_scheduler</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">lr_scheduler</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">num_update</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">lr</span>
<div class="viewcode-block" id="Optimizer.create_state"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Optimizer.create_state">[docs]</a> <span class="k">def</span> <span class="nf">create_state</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Creates auxiliary state for a given weight.</span>
<span class="sd"> Some optimizers require additional states, e.g. as momentum, in addition</span>
<span class="sd"> to gradients in order to update weights. This function creates state</span>
<span class="sd"> for a given weight which will be used in `update`. This function is</span>
<span class="sd"> called only once for each weight.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> index : int</span>
<span class="sd"> An unique index to identify the weight.</span>
<span class="sd"> weight : NDArray</span>
<span class="sd"> The weight.</span>
<span class="sd"> Returns</span>
<span class="sd"> -------</span>
<span class="sd"> state : any obj</span>
<span class="sd"> The state associated with the weight.</span>
<span class="sd"> &quot;&quot;&quot;</span></div>
<div class="viewcode-block" id="Optimizer.create_state_multi_precision"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Optimizer.create_state_multi_precision">[docs]</a> <span class="k">def</span> <span class="nf">create_state_multi_precision</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Creates auxiliary state for a given weight, including FP32 high</span>
<span class="sd"> precision copy if original weight is FP16.</span>
<span class="sd"> This method is provided to perform automatic mixed precision training</span>
<span class="sd"> for optimizers that do not support it themselves.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> index : int</span>
<span class="sd"> An unique index to identify the weight.</span>
<span class="sd"> weight : NDArray</span>
<span class="sd"> The weight.</span>
<span class="sd"> Returns</span>
<span class="sd"> -------</span>
<span class="sd"> state : any obj</span>
<span class="sd"> The state associated with the weight.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">weight_master_copy</span> <span class="o">=</span> <span class="kc">None</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">multi_precision</span> <span class="ow">and</span> <span class="n">weight</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">numpy</span><span class="o">.</span><span class="n">float16</span><span class="p">:</span>
<span class="n">weight_master_copy</span> <span class="o">=</span> <span class="n">weight</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">numpy</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span>
<span class="k">return</span> <span class="p">(</span><span class="n">weight_master_copy</span><span class="p">,)</span> <span class="o">+</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">create_state</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="n">weight_master_copy</span><span class="p">),)</span>
<span class="k">if</span> <span class="n">weight</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">numpy</span><span class="o">.</span><span class="n">float16</span> <span class="ow">and</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">multi_precision</span><span class="p">:</span>
<span class="n">warnings</span><span class="o">.</span><span class="n">warn</span><span class="p">(</span><span class="s2">&quot;Accumulating with float16 in optimizer can lead to &quot;</span>
<span class="s2">&quot;poor accuracy or slow convergence. &quot;</span>
<span class="s2">&quot;Consider using multi_precision=True option of the &quot;</span>
<span class="s2">&quot;optimizer&quot;</span><span class="p">)</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">create_state</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">)</span></div>
<div class="viewcode-block" id="Optimizer.update"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Optimizer.update">[docs]</a> <span class="k">def</span> <span class="nf">update</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Updates the given parameter using the corresponding gradient and state.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> index : int</span>
<span class="sd"> The unique index of the parameter into the individual learning</span>
<span class="sd"> rates and weight decays. Learning rates and weight decay</span>
<span class="sd"> may be set via `set_lr_mult()` and `set_wd_mult()`, respectively.</span>
<span class="sd"> weight : NDArray</span>
<span class="sd"> The parameter to be updated.</span>
<span class="sd"> grad : NDArray</span>
<span class="sd"> The gradient of the objective with respect to this parameter.</span>
<span class="sd"> state : any obj</span>
<span class="sd"> The state returned by `create_state()`.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">raise</span> <span class="ne">NotImplementedError</span><span class="p">()</span></div>
<div class="viewcode-block" id="Optimizer.update_multi_precision"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Optimizer.update_multi_precision">[docs]</a> <span class="k">def</span> <span class="nf">update_multi_precision</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Updates the given parameter using the corresponding gradient and state.</span>
<span class="sd"> Mixed precision version.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> index : int</span>
<span class="sd"> The unique index of the parameter into the individual learning</span>
<span class="sd"> rates and weight decays. Learning rates and weight decay</span>
<span class="sd"> may be set via `set_lr_mult()` and `set_wd_mult()`, respectively.</span>
<span class="sd"> weight : NDArray</span>
<span class="sd"> The parameter to be updated.</span>
<span class="sd"> grad : NDArray</span>
<span class="sd"> The gradient of the objective with respect to this parameter.</span>
<span class="sd"> state : any obj</span>
<span class="sd"> The state returned by `create_state()`.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">multi_precision</span> <span class="ow">and</span> <span class="n">weight</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">numpy</span><span class="o">.</span><span class="n">float16</span><span class="p">:</span>
<span class="c1"># Wrapper for mixed precision</span>
<span class="n">weight_master_copy</span> <span class="o">=</span> <span class="n">state</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
<span class="n">original_state</span> <span class="o">=</span> <span class="n">state</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span>
<span class="n">grad32</span> <span class="o">=</span> <span class="n">grad</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">numpy</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">update</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="n">weight_master_copy</span><span class="p">,</span> <span class="n">grad32</span><span class="p">,</span> <span class="n">original_state</span><span class="p">)</span>
<span class="n">cast</span><span class="p">(</span><span class="n">weight_master_copy</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">dtype</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">update</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">)</span></div>
<div class="viewcode-block" id="Optimizer.set_learning_rate"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Optimizer.set_learning_rate">[docs]</a> <span class="k">def</span> <span class="nf">set_learning_rate</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">lr</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Sets a new learning rate of the optimizer.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> lr : float</span>
<span class="sd"> The new learning rate of the optimizer.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">lr_scheduler</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span> <span class="c1"># pylint: disable=no-else-raise</span>
<span class="k">raise</span> <span class="ne">UserWarning</span><span class="p">(</span><span class="s2">&quot;LRScheduler of the optimizer has already been &quot;</span>
<span class="s2">&quot;defined. Note that set_learning_rate can mutate &quot;</span>
<span class="s2">&quot;the value of the learning rate of the optimizer &quot;</span>
<span class="s2">&quot;only when the LRScheduler of the optimizer is &quot;</span>
<span class="s2">&quot;undefined.&quot;</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">lr</span> <span class="o">=</span> <span class="n">lr</span></div>
<div class="viewcode-block" id="Optimizer.set_lr_scale"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Optimizer.set_lr_scale">[docs]</a> <span class="k">def</span> <span class="nf">set_lr_scale</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">args_lrscale</span><span class="p">):</span> <span class="c1"># pylint: disable=unused-argument</span>
<span class="sd">&quot;&quot;&quot;[DEPRECATED] Sets lr scale. Use set_lr_mult instead.&quot;&quot;&quot;</span>
<span class="k">raise</span> <span class="ne">DeprecationWarning</span></div>
<div class="viewcode-block" id="Optimizer.set_lr_mult"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Optimizer.set_lr_mult">[docs]</a> <span class="k">def</span> <span class="nf">set_lr_mult</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">args_lr_mult</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Sets an individual learning rate multiplier for each parameter.</span>
<span class="sd"> If you specify a learning rate multiplier for a parameter, then</span>
<span class="sd"> the learning rate for the parameter will be set as the product of</span>
<span class="sd"> the global learning rate `self.lr` and its multiplier.</span>
<span class="sd"> .. note:: The default learning rate multiplier of a `Variable`</span>
<span class="sd"> can be set with `lr_mult` argument in the constructor.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> args_lr_mult : dict of str/int to float</span>
<span class="sd"> For each of its key-value entries, the learning rate multipler for the</span>
<span class="sd"> parameter specified in the key will be set as the given value.</span>
<span class="sd"> You can specify the parameter with either its name or its index.</span>
<span class="sd"> If you use the name, you should pass `sym` in the constructor,</span>
<span class="sd"> and the name you specified in the key of `args_lr_mult` should match</span>
<span class="sd"> the name of the parameter in `sym`. If you use the index, it should</span>
<span class="sd"> correspond to the index of the parameter used in the `update` method.</span>
<span class="sd"> Specifying a parameter by its index is only supported for backward</span>
<span class="sd"> compatibility, and we recommend to use the name instead.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="bp">self</span><span class="o">.</span><span class="n">lr_mult</span> <span class="o">=</span> <span class="p">{}</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">sym_info</span><span class="p">:</span>
<span class="n">attr</span><span class="p">,</span> <span class="n">arg_names</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">sym_info</span>
<span class="k">for</span> <span class="n">name</span> <span class="ow">in</span> <span class="n">arg_names</span><span class="p">:</span>
<span class="k">if</span> <span class="n">name</span> <span class="ow">in</span> <span class="n">attr</span> <span class="ow">and</span> <span class="s1">&#39;__lr_mult__&#39;</span> <span class="ow">in</span> <span class="n">attr</span><span class="p">[</span><span class="n">name</span><span class="p">]:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">lr_mult</span><span class="p">[</span><span class="n">name</span><span class="p">]</span> <span class="o">=</span> <span class="nb">float</span><span class="p">(</span><span class="n">attr</span><span class="p">[</span><span class="n">name</span><span class="p">][</span><span class="s1">&#39;__lr_mult__&#39;</span><span class="p">])</span>
<span class="bp">self</span><span class="o">.</span><span class="n">lr_mult</span><span class="o">.</span><span class="n">update</span><span class="p">(</span><span class="n">args_lr_mult</span><span class="p">)</span></div>
<div class="viewcode-block" id="Optimizer.set_wd_mult"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Optimizer.set_wd_mult">[docs]</a> <span class="k">def</span> <span class="nf">set_wd_mult</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">args_wd_mult</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Sets an individual weight decay multiplier for each parameter.</span>
<span class="sd"> By default, if `param_idx2name` was provided in the</span>
<span class="sd"> constructor, the weight decay multipler is set as 0 for all</span>
<span class="sd"> parameters whose name don&#39;t end with ``_weight`` or</span>
<span class="sd"> ``_gamma``.</span>
<span class="sd"> .. note:: The default weight decay multiplier for a `Variable`</span>
<span class="sd"> can be set with its `wd_mult` argument in the constructor.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> args_wd_mult : dict of string/int to float</span>
<span class="sd"> For each of its key-value entries, the weight decay multipler for the</span>
<span class="sd"> parameter specified in the key will be set as the given value.</span>
<span class="sd"> You can specify the parameter with either its name or its index.</span>
<span class="sd"> If you use the name, you should pass `sym` in the constructor,</span>
<span class="sd"> and the name you specified in the key of `args_lr_mult` should match</span>
<span class="sd"> the name of the parameter in `sym`. If you use the index, it should</span>
<span class="sd"> correspond to the index of the parameter used in the `update` method.</span>
<span class="sd"> Specifying a parameter by its index is only supported for backward</span>
<span class="sd"> compatibility, and we recommend to use the name instead.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="bp">self</span><span class="o">.</span><span class="n">wd_mult</span> <span class="o">=</span> <span class="p">{}</span>
<span class="k">for</span> <span class="n">n</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">idx2name</span><span class="o">.</span><span class="n">values</span><span class="p">():</span>
<span class="k">if</span> <span class="ow">not</span> <span class="p">(</span><span class="n">n</span><span class="o">.</span><span class="n">endswith</span><span class="p">(</span><span class="s1">&#39;_weight&#39;</span><span class="p">)</span> <span class="ow">or</span> <span class="n">n</span><span class="o">.</span><span class="n">endswith</span><span class="p">(</span><span class="s1">&#39;_gamma&#39;</span><span class="p">)):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">wd_mult</span><span class="p">[</span><span class="n">n</span><span class="p">]</span> <span class="o">=</span> <span class="mf">0.0</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">sym_info</span><span class="p">:</span>
<span class="n">attr</span><span class="p">,</span> <span class="n">arg_names</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">sym_info</span>
<span class="k">for</span> <span class="n">name</span> <span class="ow">in</span> <span class="n">arg_names</span><span class="p">:</span>
<span class="k">if</span> <span class="n">name</span> <span class="ow">in</span> <span class="n">attr</span> <span class="ow">and</span> <span class="s1">&#39;__wd_mult__&#39;</span> <span class="ow">in</span> <span class="n">attr</span><span class="p">[</span><span class="n">name</span><span class="p">]:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">wd_mult</span><span class="p">[</span><span class="n">name</span><span class="p">]</span> <span class="o">=</span> <span class="nb">float</span><span class="p">(</span><span class="n">attr</span><span class="p">[</span><span class="n">name</span><span class="p">][</span><span class="s1">&#39;__wd_mult__&#39;</span><span class="p">])</span>
<span class="bp">self</span><span class="o">.</span><span class="n">wd_mult</span><span class="o">.</span><span class="n">update</span><span class="p">(</span><span class="n">args_wd_mult</span><span class="p">)</span></div>
<span class="k">def</span> <span class="nf">_set_current_context</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">device_id</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Sets the number of the currently handled device.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> device_id : int</span>
<span class="sd"> The number of current device.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">if</span> <span class="n">device_id</span> <span class="ow">not</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">_all_index_update_counts</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_all_index_update_counts</span><span class="p">[</span><span class="n">device_id</span><span class="p">]</span> <span class="o">=</span> <span class="p">{}</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_index_update_count</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_all_index_update_counts</span><span class="p">[</span><span class="n">device_id</span><span class="p">]</span>
<span class="k">def</span> <span class="nf">_update_count</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Updates num_update.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> index : int or list of int</span>
<span class="sd"> The index to be updated.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="p">(</span><span class="nb">list</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">)):</span>
<span class="n">index</span> <span class="o">=</span> <span class="p">[</span><span class="n">index</span><span class="p">]</span>
<span class="k">for</span> <span class="n">idx</span> <span class="ow">in</span> <span class="n">index</span><span class="p">:</span>
<span class="k">if</span> <span class="n">idx</span> <span class="ow">not</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">_index_update_count</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_index_update_count</span><span class="p">[</span><span class="n">idx</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">begin_num_update</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_index_update_count</span><span class="p">[</span><span class="n">idx</span><span class="p">]</span> <span class="o">+=</span> <span class="mi">1</span>
<span class="bp">self</span><span class="o">.</span><span class="n">num_update</span> <span class="o">=</span> <span class="nb">max</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_index_update_count</span><span class="p">[</span><span class="n">idx</span><span class="p">],</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_update</span><span class="p">)</span>
<span class="k">def</span> <span class="nf">_get_lrs</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">indices</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Gets the learning rates given the indices of the weights.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> indices : list of int</span>
<span class="sd"> Indices corresponding to weights.</span>
<span class="sd"> Returns</span>
<span class="sd"> -------</span>
<span class="sd"> lrs : list of float</span>
<span class="sd"> Learning rates for those indices.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">lr_scheduler</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">lr</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">lr_scheduler</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">num_update</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">lr</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">lr</span>
<span class="n">lrs</span> <span class="o">=</span> <span class="p">[</span><span class="n">lr</span> <span class="k">for</span> <span class="n">_</span> <span class="ow">in</span> <span class="n">indices</span><span class="p">]</span>
<span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">index</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">indices</span><span class="p">):</span>
<span class="k">if</span> <span class="n">index</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">param_dict</span><span class="p">:</span>
<span class="n">lrs</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">param_dict</span><span class="p">[</span><span class="n">index</span><span class="p">]</span><span class="o">.</span><span class="n">lr_mult</span>
<span class="k">elif</span> <span class="n">index</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">lr_mult</span><span class="p">:</span>
<span class="n">lrs</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">lr_mult</span><span class="p">[</span><span class="n">index</span><span class="p">]</span>
<span class="k">elif</span> <span class="n">index</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">idx2name</span><span class="p">:</span>
<span class="n">lrs</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">lr_mult</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">idx2name</span><span class="p">[</span><span class="n">index</span><span class="p">],</span> <span class="mf">1.0</span><span class="p">)</span>
<span class="k">return</span> <span class="n">lrs</span>
<span class="k">def</span> <span class="nf">_get_lr</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Gets the learning rate given the index of the weight.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> index : int</span>
<span class="sd"> The index corresponding to the weight.</span>
<span class="sd"> Returns</span>
<span class="sd"> -------</span>
<span class="sd"> lr : float</span>
<span class="sd"> Learning rate for this index.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_lrs</span><span class="p">([</span><span class="n">index</span><span class="p">])[</span><span class="mi">0</span><span class="p">]</span>
<span class="k">def</span> <span class="nf">_get_wds</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">indices</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Gets weight decays for indices.</span>
<span class="sd"> Returns 0 for non-weights if the name of weights are provided for `__init__`.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> indices : list of int</span>
<span class="sd"> Indices of weights.</span>
<span class="sd"> Returns</span>
<span class="sd"> -------</span>
<span class="sd"> wds : list of float</span>
<span class="sd"> Weight decays for those indices.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">wds</span> <span class="o">=</span> <span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">wd</span> <span class="k">for</span> <span class="n">_</span> <span class="ow">in</span> <span class="n">indices</span><span class="p">]</span>
<span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">index</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">indices</span><span class="p">):</span>
<span class="k">if</span> <span class="n">index</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">param_dict</span><span class="p">:</span>
<span class="n">wds</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">param_dict</span><span class="p">[</span><span class="n">index</span><span class="p">]</span><span class="o">.</span><span class="n">wd_mult</span>
<span class="k">elif</span> <span class="n">index</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">wd_mult</span><span class="p">:</span>
<span class="n">wds</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">wd_mult</span><span class="p">[</span><span class="n">index</span><span class="p">]</span>
<span class="k">elif</span> <span class="n">index</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">idx2name</span><span class="p">:</span>
<span class="n">wds</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">wd_mult</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">idx2name</span><span class="p">[</span><span class="n">index</span><span class="p">],</span> <span class="mf">1.0</span><span class="p">)</span>
<span class="k">return</span> <span class="n">wds</span>
<span class="k">def</span> <span class="nf">_get_wd</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Gets weight decay for index.</span>
<span class="sd"> Returns 0 for non-weights if the name of weights are provided for `__init__`.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> index : int</span>
<span class="sd"> The index of weight.</span>
<span class="sd"> Returns</span>
<span class="sd"> -------</span>
<span class="sd"> wd : float</span>
<span class="sd"> Weight decay for this index.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_wds</span><span class="p">([</span><span class="n">index</span><span class="p">])[</span><span class="mi">0</span><span class="p">]</span>
<span class="k">def</span> <span class="nf">__getstate__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="n">ret</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="vm">__dict__</span><span class="o">.</span><span class="n">copy</span><span class="p">()</span>
<span class="c1"># do not include param_dict in the state</span>
<span class="k">del</span> <span class="n">ret</span><span class="p">[</span><span class="s1">&#39;param_dict&#39;</span><span class="p">]</span>
<span class="k">return</span> <span class="n">ret</span>
<span class="k">def</span> <span class="nf">__setstate__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="vm">__dict__</span> <span class="o">=</span> <span class="n">state</span>
<span class="c1"># param_dict needs to be explicitly set by the trainer</span>
<span class="bp">self</span><span class="o">.</span><span class="n">param_dict</span> <span class="o">=</span> <span class="p">{}</span></div>
<span class="c1"># convenience wrapper for Optimizer.Register</span>
<span class="n">register</span> <span class="o">=</span> <span class="n">Optimizer</span><span class="o">.</span><span class="n">register</span> <span class="c1"># pylint: disable=invalid-name</span>
<span class="c1"># pylint: disable=line-too-long</span>
<div class="viewcode-block" id="SGD"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.SGD">[docs]</a><span class="nd">@register</span>
<span class="k">class</span> <span class="nc">SGD</span><span class="p">(</span><span class="n">Optimizer</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;The SGD optimizer with momentum and weight decay.</span>
<span class="sd"> If the storage types of grad is ``row_sparse`` and ``lazy_update`` is True, \</span>
<span class="sd"> **lazy updates** are applied by::</span>
<span class="sd"> for row in grad.indices:</span>
<span class="sd"> rescaled_grad[row] = lr * (rescale_grad * clip(grad[row], clip_gradient) + wd * weight[row])</span>
<span class="sd"> state[row] = momentum[row] * state[row] + rescaled_grad[row]</span>
<span class="sd"> weight[row] = weight[row] - state[row]</span>
<span class="sd"> The sparse update only updates the momentum for the weights whose row_sparse</span>
<span class="sd"> gradient indices appear in the current batch, rather than updating it for all</span>
<span class="sd"> indices. Compared with the original update, it can provide large</span>
<span class="sd"> improvements in model training throughput for some applications. However, it</span>
<span class="sd"> provides slightly different semantics than the original update, and</span>
<span class="sd"> may lead to different empirical results.</span>
<span class="sd"> In the case when ``update_on_kvstore`` is set to False (either globally via</span>
<span class="sd"> MXNET_UPDATE_ON_KVSTORE=0 environment variable or as a parameter in</span>
<span class="sd"> :class:`~mxnet.gluon.Trainer`) SGD optimizer can perform aggregated update</span>
<span class="sd"> of parameters, which may lead to improved performance. The aggregation size</span>
<span class="sd"> is controlled by MXNET_OPTIMIZER_AGGREGATION_SIZE environment variable and</span>
<span class="sd"> defaults to 4.</span>
<span class="sd"> Otherwise, **standard updates** are applied by::</span>
<span class="sd"> rescaled_grad = lr * (rescale_grad * clip(grad, clip_gradient) + wd * weight)</span>
<span class="sd"> state = momentum * state + rescaled_grad</span>
<span class="sd"> weight = weight - state</span>
<span class="sd"> For details of the update algorithm see</span>
<span class="sd"> :class:`~mxnet.ndarray.sgd_update` and :class:`~mxnet.ndarray.sgd_mom_update`.</span>
<span class="sd"> This optimizer accepts the following parameters in addition to those accepted</span>
<span class="sd"> by :class:`.Optimizer`.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> momentum : float, optional</span>
<span class="sd"> The momentum value.</span>
<span class="sd"> lazy_update : bool, optional</span>
<span class="sd"> Default is True. If True, lazy updates are applied \</span>
<span class="sd"> if the storage types of weight and grad are both ``row_sparse``.</span>
<span class="sd"> multi_precision: bool, optional</span>
<span class="sd"> Flag to control the internal precision of the optimizer.</span>
<span class="sd"> False: results in using the same precision as the weights (default),</span>
<span class="sd"> True: makes internal 32-bit copy of the weights and applies gradients</span>
<span class="sd"> in 32-bit precision even if actual weights used in the model have lower precision.</span>
<span class="sd"> Turning this on can improve convergence and accuracy when training with float16.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="mf">0.0</span><span class="p">,</span> <span class="n">lazy_update</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="nb">super</span><span class="p">(</span><span class="n">SGD</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">=</span> <span class="n">momentum</span>
<span class="bp">self</span><span class="o">.</span><span class="n">lazy_update</span> <span class="o">=</span> <span class="n">lazy_update</span>
<span class="bp">self</span><span class="o">.</span><span class="n">aggregate_num</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">getenv</span><span class="p">(</span><span class="s1">&#39;MXNET_OPTIMIZER_AGGREGATION_SIZE&#39;</span><span class="p">,</span> <span class="s2">&quot;4&quot;</span><span class="p">))</span>
<div class="viewcode-block" id="SGD.create_state_multi_precision"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.SGD.create_state_multi_precision">[docs]</a> <span class="k">def</span> <span class="nf">create_state_multi_precision</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="n">weight_master_copy</span> <span class="o">=</span> <span class="kc">None</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">multi_precision</span> <span class="ow">and</span> <span class="n">weight</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">numpy</span><span class="o">.</span><span class="n">float16</span><span class="p">:</span>
<span class="n">weight_master_copy</span> <span class="o">=</span> <span class="n">weight</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">numpy</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span>
<span class="k">return</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">create_state</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="n">weight_master_copy</span><span class="p">),</span> <span class="n">weight_master_copy</span><span class="p">)</span>
<span class="k">if</span> <span class="n">weight</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">numpy</span><span class="o">.</span><span class="n">float16</span> <span class="ow">and</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">multi_precision</span><span class="p">:</span>
<span class="n">warnings</span><span class="o">.</span><span class="n">warn</span><span class="p">(</span><span class="s2">&quot;Accumulating with float16 in optimizer can lead to &quot;</span>
<span class="s2">&quot;poor accuracy or slow convergence. &quot;</span>
<span class="s2">&quot;Consider using multi_precision=True option of the &quot;</span>
<span class="s2">&quot;SGD optimizer&quot;</span><span class="p">)</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">create_state</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">)</span></div>
<div class="viewcode-block" id="SGD.create_state"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.SGD.create_state">[docs]</a> <span class="k">def</span> <span class="nf">create_state</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="n">momentum</span> <span class="o">=</span> <span class="kc">None</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">!=</span> <span class="mf">0.0</span><span class="p">:</span>
<span class="n">stype</span> <span class="o">=</span> <span class="n">weight</span><span class="o">.</span><span class="n">stype</span> <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">lazy_update</span> <span class="k">else</span> <span class="s1">&#39;default&#39;</span>
<span class="n">momentum</span> <span class="o">=</span> <span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">dtype</span><span class="p">,</span> <span class="n">stype</span><span class="o">=</span><span class="n">stype</span><span class="p">)</span>
<span class="k">return</span> <span class="n">momentum</span></div>
<span class="k">def</span> <span class="nf">_update_impl</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">indices</span><span class="p">,</span> <span class="n">weights</span><span class="p">,</span> <span class="n">grads</span><span class="p">,</span> <span class="n">states</span><span class="p">,</span> <span class="n">multi_precision</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
<span class="n">aggregate</span> <span class="o">=</span> <span class="kc">True</span>
<span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">indices</span><span class="p">,</span> <span class="p">(</span><span class="nb">tuple</span><span class="p">,</span> <span class="nb">list</span><span class="p">)):</span>
<span class="n">indices</span> <span class="o">=</span> <span class="p">[</span><span class="n">indices</span><span class="p">]</span>
<span class="n">weights</span> <span class="o">=</span> <span class="p">[</span><span class="n">weights</span><span class="p">]</span>
<span class="n">grads</span> <span class="o">=</span> <span class="p">[</span><span class="n">grads</span><span class="p">]</span>
<span class="n">states</span> <span class="o">=</span> <span class="p">[</span><span class="n">states</span><span class="p">]</span>
<span class="k">for</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">(</span><span class="n">weights</span><span class="p">,</span> <span class="n">grads</span><span class="p">):</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="n">aggregate</span> <span class="o">=</span> <span class="p">(</span><span class="n">aggregate</span> <span class="ow">and</span>
<span class="n">weight</span><span class="o">.</span><span class="n">stype</span> <span class="o">==</span> <span class="s1">&#39;default&#39;</span> <span class="ow">and</span>
<span class="n">grad</span><span class="o">.</span><span class="n">stype</span> <span class="o">==</span> <span class="s1">&#39;default&#39;</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_count</span><span class="p">(</span><span class="n">indices</span><span class="p">)</span>
<span class="n">lrs</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_lrs</span><span class="p">(</span><span class="n">indices</span><span class="p">)</span>
<span class="n">wds</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_wds</span><span class="p">(</span><span class="n">indices</span><span class="p">)</span>
<span class="n">kwargs</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;rescale_grad&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">rescale_grad</span><span class="p">}</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;momentum&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;clip_gradient&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span>
<span class="k">if</span> <span class="n">aggregate</span><span class="p">:</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">multi_precision</span><span class="p">:</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
<span class="n">multi_sgd_mom_update</span><span class="p">(</span><span class="o">*</span><span class="n">_flatten_list</span><span class="p">(</span><span class="nb">zip</span><span class="p">(</span><span class="n">weights</span><span class="p">,</span> <span class="n">grads</span><span class="p">,</span> <span class="n">states</span><span class="p">)),</span> <span class="n">out</span><span class="o">=</span><span class="n">weights</span><span class="p">,</span>
<span class="n">num_weights</span><span class="o">=</span><span class="nb">len</span><span class="p">(</span><span class="n">weights</span><span class="p">),</span> <span class="n">lrs</span><span class="o">=</span><span class="n">lrs</span><span class="p">,</span> <span class="n">wds</span><span class="o">=</span><span class="n">wds</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">multi_sgd_update</span><span class="p">(</span><span class="o">*</span><span class="n">_flatten_list</span><span class="p">(</span><span class="nb">zip</span><span class="p">(</span><span class="n">weights</span><span class="p">,</span> <span class="n">grads</span><span class="p">)),</span> <span class="n">out</span><span class="o">=</span><span class="n">weights</span><span class="p">,</span>
<span class="n">num_weights</span><span class="o">=</span><span class="nb">len</span><span class="p">(</span><span class="n">weights</span><span class="p">),</span> <span class="n">lrs</span><span class="o">=</span><span class="n">lrs</span><span class="p">,</span> <span class="n">wds</span><span class="o">=</span><span class="n">wds</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
<span class="n">multi_mp_sgd_mom_update</span><span class="p">(</span><span class="o">*</span><span class="n">_flatten_list</span><span class="p">(</span><span class="nb">zip</span><span class="p">(</span><span class="n">weights</span><span class="p">,</span> <span class="n">grads</span><span class="p">,</span> <span class="o">*</span><span class="nb">zip</span><span class="p">(</span><span class="o">*</span><span class="n">states</span><span class="p">))),</span>
<span class="n">out</span><span class="o">=</span><span class="n">weights</span><span class="p">,</span> <span class="n">num_weights</span><span class="o">=</span><span class="nb">len</span><span class="p">(</span><span class="n">weights</span><span class="p">),</span>
<span class="n">lrs</span><span class="o">=</span><span class="n">lrs</span><span class="p">,</span> <span class="n">wds</span><span class="o">=</span><span class="n">wds</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">multi_mp_sgd_update</span><span class="p">(</span><span class="o">*</span><span class="n">_flatten_list</span><span class="p">(</span><span class="nb">zip</span><span class="p">(</span><span class="n">weights</span><span class="p">,</span> <span class="n">grads</span><span class="p">,</span>
<span class="nb">list</span><span class="p">(</span><span class="nb">zip</span><span class="p">(</span><span class="o">*</span><span class="n">states</span><span class="p">))[</span><span class="mi">1</span><span class="p">])),</span>
<span class="n">out</span><span class="o">=</span><span class="n">weights</span><span class="p">,</span> <span class="n">num_weights</span><span class="o">=</span><span class="nb">len</span><span class="p">(</span><span class="n">weights</span><span class="p">),</span>
<span class="n">lrs</span><span class="o">=</span><span class="n">lrs</span><span class="p">,</span> <span class="n">wds</span><span class="o">=</span><span class="n">wds</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">for</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">,</span> <span class="n">lr</span><span class="p">,</span> <span class="n">wd</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">(</span><span class="n">weights</span><span class="p">,</span> <span class="n">grads</span><span class="p">,</span> <span class="n">states</span><span class="p">,</span> <span class="n">lrs</span><span class="p">,</span> <span class="n">wds</span><span class="p">):</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">multi_precision</span><span class="p">:</span>
<span class="k">if</span> <span class="n">state</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">sgd_mom_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span>
<span class="n">lazy_update</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">lazy_update</span><span class="p">,</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">sgd_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span> <span class="n">lazy_update</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">lazy_update</span><span class="p">,</span>
<span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">if</span> <span class="n">state</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">mp_sgd_mom_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">state</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span>
<span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">mp_sgd_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span>
<span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<div class="viewcode-block" id="SGD.update"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.SGD.update">[docs]</a> <span class="k">def</span> <span class="nf">update</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_impl</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">,</span> <span class="n">multi_precision</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span></div>
<div class="viewcode-block" id="SGD.update_multi_precision"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.SGD.update_multi_precision">[docs]</a> <span class="k">def</span> <span class="nf">update_multi_precision</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="p">(</span><span class="nb">tuple</span><span class="p">,</span> <span class="nb">list</span><span class="p">)):</span>
<span class="n">use_multi_precision</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">multi_precision</span> <span class="ow">and</span> <span class="n">weight</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">numpy</span><span class="o">.</span><span class="n">float16</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">use_multi_precision</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">multi_precision</span> <span class="ow">and</span> <span class="n">weight</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">numpy</span><span class="o">.</span><span class="n">float16</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_impl</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">,</span>
<span class="n">multi_precision</span><span class="o">=</span><span class="n">use_multi_precision</span><span class="p">)</span></div></div>
<div class="viewcode-block" id="Signum"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Signum">[docs]</a><span class="nd">@register</span>
<span class="k">class</span> <span class="nc">Signum</span><span class="p">(</span><span class="n">Optimizer</span><span class="p">):</span>
<span class="sa">r</span><span class="sd">&quot;&quot;&quot;The Signum optimizer that takes the sign of gradient or momentum.</span>
<span class="sd"> The optimizer updates the weight by::</span>
<span class="sd"> rescaled_grad = rescale_grad * clip(grad, clip_gradient) + wd * weight</span>
<span class="sd"> state = momentum * state + (1-momentum)*rescaled_grad</span>
<span class="sd"> weight = (1 - lr * wd_lh) * weight - lr * sign(state)</span>
<span class="sd"> References</span>
<span class="sd"> ----------</span>
<span class="sd"> Jeremy Bernstein, Yu-Xiang Wang, Kamyar Azizzadenesheli &amp; Anima Anandkumar. (2018).</span>
<span class="sd"> signSGD: Compressed Optimisation for Non-Convex Problems. In ICML&#39;18.</span>
<span class="sd"> See: https://arxiv.org/abs/1802.04434</span>
<span class="sd"> For details of the update algorithm see</span>
<span class="sd"> :class:`~mxnet.ndarray.signsgd_update` and :class:`~mxnet.ndarray.signum_update`.</span>
<span class="sd"> This optimizer accepts the following parameters in addition to those accepted</span>
<span class="sd"> by :class:`.Optimizer`.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> momentum : float, optional</span>
<span class="sd"> The momentum value.</span>
<span class="sd"> wd_lh : float, optional</span>
<span class="sd"> The amount of decoupled weight decay regularization, see details in the original paper at:\</span>
<span class="sd"> https://arxiv.org/abs/1711.05101</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">learning_rate</span><span class="o">=</span><span class="mf">0.01</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="mf">0.9</span><span class="p">,</span> <span class="n">wd_lh</span><span class="o">=</span><span class="mf">0.0</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="nb">super</span><span class="p">(</span><span class="n">Signum</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">learning_rate</span><span class="o">=</span><span class="n">learning_rate</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">=</span> <span class="n">momentum</span>
<span class="bp">self</span><span class="o">.</span><span class="n">wd_lh</span> <span class="o">=</span> <span class="n">wd_lh</span>
<div class="viewcode-block" id="Signum.create_state"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Signum.create_state">[docs]</a> <span class="k">def</span> <span class="nf">create_state</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="n">momentum</span> <span class="o">=</span> <span class="kc">None</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">!=</span> <span class="mf">0.0</span><span class="p">:</span>
<span class="n">momentum</span> <span class="o">=</span> <span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">dtype</span><span class="p">,</span> <span class="n">stype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">stype</span><span class="p">)</span>
<span class="k">return</span> <span class="n">momentum</span></div>
<span class="k">def</span> <span class="nf">_update_impl</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_count</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">lr</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_lr</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">wd</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_wd</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">kwargs</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;rescale_grad&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">rescale_grad</span><span class="p">}</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;momentum&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;clip_gradient&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">wd_lh</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;wd_lh&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">wd_lh</span>
<span class="k">if</span> <span class="n">state</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">signum_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span>
<span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">signsgd_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span>
<span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<div class="viewcode-block" id="Signum.update"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Signum.update">[docs]</a> <span class="k">def</span> <span class="nf">update</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_impl</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">)</span></div></div>
<div class="viewcode-block" id="FTML"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.FTML">[docs]</a><span class="nd">@register</span>
<span class="k">class</span> <span class="nc">FTML</span><span class="p">(</span><span class="n">Optimizer</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;The FTML optimizer.</span>
<span class="sd"> This class implements the optimizer described in</span>
<span class="sd"> *FTML - Follow the Moving Leader in Deep Learning*,</span>
<span class="sd"> available at http://proceedings.mlr.press/v70/zheng17a/zheng17a.pdf.</span>
<span class="sd"> Denote time step by t. The optimizer updates the weight by::</span>
<span class="sd"> rescaled_grad = clip(grad * rescale_grad + wd * weight, clip_gradient)</span>
<span class="sd"> v = beta2 * v + (1 - beta2) * square(rescaled_grad)</span>
<span class="sd"> d_t = (1 - power(beta1, t)) / lr * square_root(v / (1 - power(beta2, t))) + epsilon)</span>
<span class="sd"> z = beta1 * z + (1 - beta1) * rescaled_grad - (d_t - beta1 * d_(t-1)) * weight</span>
<span class="sd"> weight = - z / d_t</span>
<span class="sd"> For details of the update algorithm, see :class:`~mxnet.ndarray.ftml_update`.</span>
<span class="sd"> This optimizer accepts the following parameters in addition to those accepted</span>
<span class="sd"> by :class:`.Optimizer`.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> beta1 : float, optional</span>
<span class="sd"> 0 &lt; beta1 &lt; 1. Generally close to 0.5.</span>
<span class="sd"> beta2 : float, optional</span>
<span class="sd"> 0 &lt; beta2 &lt; 1. Generally close to 1.</span>
<span class="sd"> epsilon : float, optional</span>
<span class="sd"> Small value to avoid division by 0.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">beta1</span><span class="o">=</span><span class="mf">0.6</span><span class="p">,</span> <span class="n">beta2</span><span class="o">=</span><span class="mf">0.999</span><span class="p">,</span> <span class="n">epsilon</span><span class="o">=</span><span class="mf">1e-8</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="nb">super</span><span class="p">(</span><span class="n">FTML</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">beta1</span> <span class="o">=</span> <span class="n">beta1</span>
<span class="bp">self</span><span class="o">.</span><span class="n">beta2</span> <span class="o">=</span> <span class="n">beta2</span>
<span class="bp">self</span><span class="o">.</span><span class="n">epsilon</span> <span class="o">=</span> <span class="n">epsilon</span>
<div class="viewcode-block" id="FTML.create_state"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.FTML.create_state">[docs]</a> <span class="k">def</span> <span class="nf">create_state</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="k">return</span> <span class="p">(</span><span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">dtype</span><span class="p">),</span> <span class="c1"># d_0</span>
<span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">dtype</span><span class="p">),</span> <span class="c1"># v_0</span>
<span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">dtype</span><span class="p">))</span> <span class="c1"># z_0</span></div>
<div class="viewcode-block" id="FTML.update"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.FTML.update">[docs]</a> <span class="k">def</span> <span class="nf">update</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_count</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">lr</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_lr</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">wd</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_wd</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">t</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_index_update_count</span><span class="p">[</span><span class="n">index</span><span class="p">]</span>
<span class="n">kwargs</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;beta1&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta1</span><span class="p">,</span> <span class="s1">&#39;beta2&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta2</span><span class="p">,</span> <span class="s1">&#39;epsilon&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">epsilon</span><span class="p">,</span>
<span class="s1">&#39;rescale_grad&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">rescale_grad</span><span class="p">,</span> <span class="s1">&#39;t&#39;</span><span class="p">:</span> <span class="n">t</span><span class="p">}</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;clip_grad&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span>
<span class="n">prev_d</span><span class="p">,</span> <span class="n">prev_v</span><span class="p">,</span> <span class="n">prev_z</span> <span class="o">=</span> <span class="n">state</span>
<span class="n">ftml_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">prev_d</span><span class="p">,</span> <span class="n">prev_v</span><span class="p">,</span> <span class="n">prev_z</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span>
<span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span></div></div>
<div class="viewcode-block" id="LARS"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.LARS">[docs]</a><span class="nd">@register</span>
<span class="k">class</span> <span class="nc">LARS</span><span class="p">(</span><span class="n">Optimizer</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;the LARS optimizer from &#39;Large Batch Training of Convolution Networks&#39; \</span>
<span class="sd"> (https://arxiv.org/abs/1708.03888)</span>
<span class="sd"> Behave mostly like SGD with momentum and weight decay but is scaling \</span>
<span class="sd"> adaptively the learning for each layer (except bias and batch norm parameters):</span>
<span class="sd"> w_norm = L2norm(weights)</span>
<span class="sd"> g_norm = L2norm(gradients)</span>
<span class="sd"> if w_norm &gt; 0 and g_norm &gt; 0:</span>
<span class="sd"> lr_layer = lr * lr_mult * eta * w_norm / (g_norm + weight_decay * w_norm + eps)</span>
<span class="sd"> else:</span>
<span class="sd"> lr_layer = lr * lr_mult</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> momentum : float, optional</span>
<span class="sd"> The momentum value.</span>
<span class="sd"> lazy_update : bool, optional</span>
<span class="sd"> Default is True. If True, lazy updates are applied \</span>
<span class="sd"> if the storage types of weight and grad are both ``row_sparse``.</span>
<span class="sd"> lars_eta : float, optional</span>
<span class="sd"> LARS coefficient used to scale the learning rate. Default set to 0.001.</span>
<span class="sd"> lars_epsilon : float, optional</span>
<span class="sd"> Optional epsilon in case of very small gradients. Default set to 0.</span>
<span class="sd"> momentum_correction : bool, optional</span>
<span class="sd"> If True scale momentum w.r.t global learning rate change (with an lr_scheduler) \</span>
<span class="sd"> as indicated in &#39;Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour` \</span>
<span class="sd"> (https://arxiv.org/pdf/1706.02677.pdf)</span>
<span class="sd"> Default set to True.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="mf">0.0</span><span class="p">,</span> <span class="n">lazy_update</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">eta</span><span class="o">=</span><span class="mf">0.001</span><span class="p">,</span> <span class="n">eps</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span>
<span class="n">momentum_correction</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="nb">super</span><span class="p">(</span><span class="n">LARS</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">=</span> <span class="n">momentum</span>
<span class="bp">self</span><span class="o">.</span><span class="n">momentum_correction</span> <span class="o">=</span> <span class="n">momentum_correction</span>
<span class="bp">self</span><span class="o">.</span><span class="n">lazy_update</span> <span class="o">=</span> <span class="n">lazy_update</span>
<span class="bp">self</span><span class="o">.</span><span class="n">aggregate_num</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">getenv</span><span class="p">(</span><span class="s1">&#39;MXNET_OPTIMIZER_AGGREGATION_SIZE&#39;</span><span class="p">,</span> <span class="s2">&quot;4&quot;</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">eta</span> <span class="o">=</span> <span class="n">eta</span>
<span class="bp">self</span><span class="o">.</span><span class="n">eps</span> <span class="o">=</span> <span class="n">eps</span>
<span class="bp">self</span><span class="o">.</span><span class="n">skip</span> <span class="o">=</span> <span class="mi">0</span>
<span class="bp">self</span><span class="o">.</span><span class="n">last_lr</span> <span class="o">=</span> <span class="kc">None</span>
<span class="bp">self</span><span class="o">.</span><span class="n">cur_lr</span> <span class="o">=</span> <span class="kc">None</span>
<span class="k">def</span> <span class="nf">_get_lrs</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">indices</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Gets the learning rates given the indices of the weights.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> indices : list of int</span>
<span class="sd"> Indices corresponding to weights.</span>
<span class="sd"> Returns</span>
<span class="sd"> -------</span>
<span class="sd"> lrs : list of float</span>
<span class="sd"> Learning rates for those indices.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">cur_lr</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">last_lr</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">cur_lr</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">lr_scheduler</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">lr</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">lr_scheduler</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">num_update</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">lr</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">lr</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">cur_lr</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">last_lr</span> <span class="o">=</span> <span class="n">lr</span>
<span class="bp">self</span><span class="o">.</span><span class="n">cur_lr</span> <span class="o">=</span> <span class="n">lr</span>
<span class="n">lrs</span> <span class="o">=</span> <span class="p">[</span><span class="n">lr</span> <span class="k">for</span> <span class="n">_</span> <span class="ow">in</span> <span class="n">indices</span><span class="p">]</span>
<span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">index</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">indices</span><span class="p">):</span>
<span class="k">if</span> <span class="n">index</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">param_dict</span><span class="p">:</span>
<span class="n">lrs</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">param_dict</span><span class="p">[</span><span class="n">index</span><span class="p">]</span><span class="o">.</span><span class="n">lr_mult</span>
<span class="k">elif</span> <span class="n">index</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">lr_mult</span><span class="p">:</span>
<span class="n">lrs</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">lr_mult</span><span class="p">[</span><span class="n">index</span><span class="p">]</span>
<span class="k">elif</span> <span class="n">index</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">idx2name</span><span class="p">:</span>
<span class="n">lrs</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">lr_mult</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">idx2name</span><span class="p">[</span><span class="n">index</span><span class="p">],</span> <span class="mf">1.0</span><span class="p">)</span>
<span class="k">return</span> <span class="n">lrs</span>
<div class="viewcode-block" id="LARS.set_wd_mult"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.LARS.set_wd_mult">[docs]</a> <span class="k">def</span> <span class="nf">set_wd_mult</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">args_wd_mult</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">wd_mult</span> <span class="o">=</span> <span class="p">{}</span>
<span class="k">for</span> <span class="n">n</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">idx2name</span><span class="o">.</span><span class="n">values</span><span class="p">():</span>
<span class="n">is_weight</span> <span class="o">=</span> <span class="n">n</span><span class="o">.</span><span class="n">endswith</span><span class="p">(</span><span class="s1">&#39;_weight&#39;</span><span class="p">)</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">is_weight</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">wd_mult</span><span class="p">[</span><span class="n">n</span><span class="p">]</span> <span class="o">=</span> <span class="mf">0.0</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">sym_info</span><span class="p">:</span>
<span class="n">attr</span><span class="p">,</span> <span class="n">arg_names</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">sym_info</span>
<span class="k">for</span> <span class="n">name</span> <span class="ow">in</span> <span class="n">arg_names</span><span class="p">:</span>
<span class="k">if</span> <span class="n">name</span> <span class="ow">in</span> <span class="n">attr</span> <span class="ow">and</span> <span class="s1">&#39;__wd_mult__&#39;</span> <span class="ow">in</span> <span class="n">attr</span><span class="p">[</span><span class="n">name</span><span class="p">]:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">wd_mult</span><span class="p">[</span><span class="n">name</span><span class="p">]</span> <span class="o">=</span> <span class="nb">float</span><span class="p">(</span><span class="n">attr</span><span class="p">[</span><span class="n">name</span><span class="p">][</span><span class="s1">&#39;__wd_mult__&#39;</span><span class="p">])</span>
<span class="bp">self</span><span class="o">.</span><span class="n">wd_mult</span><span class="o">.</span><span class="n">update</span><span class="p">(</span><span class="n">args_wd_mult</span><span class="p">)</span></div>
<div class="viewcode-block" id="LARS.create_state_multi_precision"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.LARS.create_state_multi_precision">[docs]</a> <span class="k">def</span> <span class="nf">create_state_multi_precision</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="n">weight_master_copy</span> <span class="o">=</span> <span class="kc">None</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">multi_precision</span> <span class="ow">and</span> <span class="n">weight</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">numpy</span><span class="o">.</span><span class="n">float16</span><span class="p">:</span>
<span class="n">weight_master_copy</span> <span class="o">=</span> <span class="n">weight</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">numpy</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span>
<span class="k">return</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">create_state</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="n">weight_master_copy</span><span class="p">),</span> <span class="n">weight_master_copy</span><span class="p">)</span>
<span class="k">if</span> <span class="n">weight</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">numpy</span><span class="o">.</span><span class="n">float16</span> <span class="ow">and</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">multi_precision</span><span class="p">:</span>
<span class="n">warnings</span><span class="o">.</span><span class="n">warn</span><span class="p">(</span><span class="s2">&quot;Accumulating with float16 in optimizer can lead to &quot;</span>
<span class="s2">&quot;poor accuracy or slow convergence. &quot;</span>
<span class="s2">&quot;Consider using multi_precision=True option of the &quot;</span>
<span class="s2">&quot;SGD optimizer&quot;</span><span class="p">)</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">create_state</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">)</span></div>
<div class="viewcode-block" id="LARS.create_state"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.LARS.create_state">[docs]</a> <span class="k">def</span> <span class="nf">create_state</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="n">momentum</span> <span class="o">=</span> <span class="kc">None</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">!=</span> <span class="mf">0.0</span><span class="p">:</span>
<span class="n">stype</span> <span class="o">=</span> <span class="n">weight</span><span class="o">.</span><span class="n">stype</span> <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">lazy_update</span> <span class="k">else</span> <span class="s1">&#39;default&#39;</span>
<span class="n">momentum</span> <span class="o">=</span> <span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">dtype</span><span class="p">,</span> <span class="n">stype</span><span class="o">=</span><span class="n">stype</span><span class="p">)</span>
<span class="k">return</span> <span class="n">momentum</span></div>
<span class="k">def</span> <span class="nf">_l2norm</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">v</span><span class="p">,</span> <span class="n">rescale</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;L2 Norm implementation&quot;&quot;&quot;</span>
<span class="n">v</span> <span class="o">=</span> <span class="n">v</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="s1">&#39;float32&#39;</span><span class="p">)</span>
<span class="k">if</span> <span class="n">rescale</span><span class="p">:</span>
<span class="n">v</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">rescale_grad</span>
<span class="n">norm</span> <span class="o">=</span> <span class="n">NDnorm</span><span class="p">(</span><span class="n">v</span><span class="p">)</span><span class="o">.</span><span class="n">asnumpy</span><span class="p">()[</span><span class="mi">0</span><span class="p">]</span>
<span class="k">return</span> <span class="n">norm</span>
<span class="k">def</span> <span class="nf">_get_lars</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">i</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">g</span><span class="p">,</span> <span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Returns a scaling factor for the learning rate for this layer&quot;&quot;&quot;</span>
<span class="n">name</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">idx2name</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="k">if</span> <span class="n">i</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">idx2name</span> <span class="k">else</span> <span class="nb">str</span><span class="p">(</span><span class="n">i</span><span class="p">)</span>
<span class="k">if</span> <span class="n">name</span><span class="o">.</span><span class="n">endswith</span><span class="p">(</span><span class="s1">&#39;gamma&#39;</span><span class="p">)</span> <span class="ow">or</span> <span class="n">name</span><span class="o">.</span><span class="n">endswith</span><span class="p">(</span><span class="s1">&#39;beta&#39;</span><span class="p">)</span> <span class="ow">or</span> <span class="n">name</span><span class="o">.</span><span class="n">endswith</span><span class="p">(</span><span class="s1">&#39;bias&#39;</span><span class="p">):</span>
<span class="k">return</span> <span class="n">lr</span>
<span class="n">w_norm</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_l2norm</span><span class="p">(</span><span class="n">weight</span><span class="p">)</span>
<span class="n">g_norm</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_l2norm</span><span class="p">(</span><span class="n">g</span><span class="p">,</span> <span class="n">rescale</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="k">if</span> <span class="n">w_norm</span> <span class="o">&gt;</span> <span class="mf">0.0</span> <span class="ow">and</span> <span class="n">g_norm</span> <span class="o">&gt;</span> <span class="mf">0.0</span><span class="p">:</span>
<span class="n">lars</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">eta</span> <span class="o">*</span> <span class="n">w_norm</span><span class="o">/</span><span class="p">(</span><span class="n">g_norm</span> <span class="o">+</span> <span class="n">wd</span> <span class="o">*</span> <span class="n">w_norm</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">eps</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">lars</span> <span class="o">=</span> <span class="mf">1.0</span>
<span class="k">return</span> <span class="n">lars</span> <span class="o">*</span> <span class="n">lr</span>
<span class="k">def</span> <span class="nf">_update_impl</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">indices</span><span class="p">,</span> <span class="n">weights</span><span class="p">,</span> <span class="n">grads</span><span class="p">,</span> <span class="n">states</span><span class="p">,</span> <span class="n">multi_precision</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
<span class="n">aggregate</span> <span class="o">=</span> <span class="kc">True</span>
<span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">indices</span><span class="p">,</span> <span class="p">(</span><span class="nb">tuple</span><span class="p">,</span> <span class="nb">list</span><span class="p">)):</span>
<span class="n">indices</span> <span class="o">=</span> <span class="p">[</span><span class="n">indices</span><span class="p">]</span>
<span class="n">weights</span> <span class="o">=</span> <span class="p">[</span><span class="n">weights</span><span class="p">]</span>
<span class="n">grads</span> <span class="o">=</span> <span class="p">[</span><span class="n">grads</span><span class="p">]</span>
<span class="n">states</span> <span class="o">=</span> <span class="p">[</span><span class="n">states</span><span class="p">]</span>
<span class="k">for</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">(</span><span class="n">weights</span><span class="p">,</span> <span class="n">grads</span><span class="p">):</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="n">aggregate</span> <span class="o">=</span> <span class="p">(</span><span class="n">aggregate</span> <span class="ow">and</span>
<span class="n">weight</span><span class="o">.</span><span class="n">stype</span> <span class="o">==</span> <span class="s1">&#39;default&#39;</span> <span class="ow">and</span>
<span class="n">grad</span><span class="o">.</span><span class="n">stype</span> <span class="o">==</span> <span class="s1">&#39;default&#39;</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_count</span><span class="p">(</span><span class="n">indices</span><span class="p">)</span>
<span class="n">lrs</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_lrs</span><span class="p">(</span><span class="n">indices</span><span class="p">)</span>
<span class="n">wds</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_wds</span><span class="p">(</span><span class="n">indices</span><span class="p">)</span>
<span class="n">kwargs</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;rescale_grad&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">rescale_grad</span><span class="p">}</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;momentum&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">*</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">cur_lr</span> <span class="o">/</span> <span class="bp">self</span><span class="o">.</span><span class="n">last_lr</span><span class="p">))</span> \
<span class="k">if</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">momentum_correction</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">last_lr</span> <span class="o">!=</span> <span class="mi">0</span><span class="p">)</span> <span class="k">else</span> \
<span class="bp">self</span><span class="o">.</span><span class="n">momentum</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;clip_gradient&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span>
<span class="k">if</span> <span class="n">aggregate</span><span class="p">:</span>
<span class="n">nb_params</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">indices</span><span class="p">)</span>
<span class="n">names</span> <span class="o">=</span> <span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">idx2name</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="k">if</span> <span class="n">i</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">idx2name</span> <span class="k">else</span> <span class="nb">str</span><span class="p">(</span><span class="n">i</span><span class="p">)</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="n">indices</span><span class="p">]</span>
<span class="n">lars_idx</span> <span class="o">=</span> <span class="p">[</span><span class="n">i</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">nb_params</span><span class="p">)</span> <span class="k">if</span>
<span class="ow">not</span><span class="p">(</span><span class="n">names</span><span class="p">[</span><span class="n">i</span><span class="p">]</span><span class="o">.</span><span class="n">endswith</span><span class="p">(</span><span class="s1">&#39;gamma&#39;</span><span class="p">)</span> <span class="ow">or</span> <span class="n">names</span><span class="p">[</span><span class="n">i</span><span class="p">]</span><span class="o">.</span><span class="n">endswith</span><span class="p">(</span><span class="s1">&#39;beta&#39;</span><span class="p">)</span> <span class="ow">or</span>
<span class="n">names</span><span class="p">[</span><span class="n">i</span><span class="p">]</span><span class="o">.</span><span class="n">endswith</span><span class="p">(</span><span class="s1">&#39;bias&#39;</span><span class="p">))]</span>
<span class="n">nb_lars</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">lars_idx</span><span class="p">)</span>
<span class="n">no_lars_idx</span> <span class="o">=</span> <span class="p">[</span><span class="n">i</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">nb_params</span><span class="p">)</span> <span class="k">if</span>
<span class="p">(</span><span class="n">names</span><span class="p">[</span><span class="n">i</span><span class="p">]</span><span class="o">.</span><span class="n">endswith</span><span class="p">(</span><span class="s1">&#39;gamma&#39;</span><span class="p">)</span> <span class="ow">or</span> <span class="n">names</span><span class="p">[</span><span class="n">i</span><span class="p">]</span><span class="o">.</span><span class="n">endswith</span><span class="p">(</span><span class="s1">&#39;beta&#39;</span><span class="p">)</span> <span class="ow">or</span>
<span class="n">names</span><span class="p">[</span><span class="n">i</span><span class="p">]</span><span class="o">.</span><span class="n">endswith</span><span class="p">(</span><span class="s1">&#39;bias&#39;</span><span class="p">))]</span>
<span class="n">cur_ctx</span> <span class="o">=</span> <span class="n">weights</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">context</span>
<span class="n">full_idx</span> <span class="o">=</span> <span class="n">lars_idx</span> <span class="o">+</span> <span class="n">no_lars_idx</span>
<span class="n">new_lrs</span> <span class="o">=</span> <span class="n">array</span><span class="p">([</span><span class="n">lrs</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="n">full_idx</span><span class="p">],</span> <span class="n">ctx</span><span class="o">=</span><span class="n">cur_ctx</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">&#39;float32&#39;</span><span class="p">)</span>
<span class="n">new_wds</span> <span class="o">=</span> <span class="n">array</span><span class="p">([</span><span class="n">wds</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="n">full_idx</span><span class="p">],</span> <span class="n">ctx</span><span class="o">=</span><span class="n">cur_ctx</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="s1">&#39;float32&#39;</span><span class="p">)</span>
<span class="n">new_weights</span> <span class="o">=</span> <span class="p">[</span><span class="n">weights</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="n">full_idx</span><span class="p">]</span>
<span class="n">new_grads</span> <span class="o">=</span> <span class="p">[</span><span class="n">grads</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="n">full_idx</span><span class="p">]</span>
<span class="n">new_states</span> <span class="o">=</span> <span class="p">[</span><span class="n">states</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="n">full_idx</span><span class="p">]</span>
<span class="k">if</span> <span class="n">nb_lars</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
<span class="n">w_sum_sq</span> <span class="o">=</span> <span class="n">multi_sum_sq</span><span class="p">(</span><span class="o">*</span><span class="n">new_weights</span><span class="p">[:</span><span class="n">nb_lars</span><span class="p">],</span> <span class="n">num_arrays</span><span class="o">=</span><span class="n">nb_lars</span><span class="p">)</span>
<span class="n">g_sum_sq</span> <span class="o">=</span> <span class="n">multi_sum_sq</span><span class="p">(</span><span class="o">*</span><span class="n">new_grads</span><span class="p">[:</span><span class="n">nb_lars</span><span class="p">],</span> <span class="n">num_arrays</span><span class="o">=</span><span class="n">nb_lars</span><span class="p">)</span>
<span class="n">multi_lars</span><span class="p">(</span><span class="n">new_lrs</span><span class="p">[:</span><span class="n">nb_lars</span><span class="p">],</span> <span class="n">w_sum_sq</span><span class="p">,</span> <span class="n">g_sum_sq</span><span class="p">,</span> <span class="n">new_wds</span><span class="p">[:</span><span class="n">nb_lars</span><span class="p">],</span>
<span class="n">eta</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">eta</span><span class="p">,</span> <span class="n">eps</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">eps</span><span class="p">,</span> <span class="n">rescale_grad</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">rescale_grad</span><span class="p">,</span>
<span class="n">out</span><span class="o">=</span><span class="n">new_lrs</span><span class="p">[:</span><span class="n">nb_lars</span><span class="p">])</span>
<span class="c1"># Same than usual using preloaded sgd functions</span>
<span class="n">sidx</span> <span class="o">=</span> <span class="mi">0</span>
<span class="k">while</span> <span class="n">sidx</span> <span class="o">&lt;</span> <span class="nb">len</span><span class="p">(</span><span class="n">indices</span><span class="p">):</span>
<span class="n">eidx</span> <span class="o">=</span> <span class="n">sidx</span> <span class="o">+</span> <span class="nb">len</span><span class="p">(</span><span class="n">new_weights</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">sidx</span><span class="o">+</span><span class="bp">self</span><span class="o">.</span><span class="n">aggregate_num</span><span class="p">])</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">multi_precision</span><span class="p">:</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
<span class="n">preloaded_multi_sgd_mom_update</span><span class="p">(</span>
<span class="o">*</span><span class="p">(</span><span class="n">_flatten_list</span><span class="p">(</span><span class="nb">zip</span><span class="p">(</span><span class="n">new_weights</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="n">new_grads</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="n">new_states</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">]))</span> <span class="o">+</span>
<span class="p">[</span><span class="n">new_lrs</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span> <span class="n">new_wds</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">]]),</span>
<span class="n">out</span><span class="o">=</span><span class="n">new_weights</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="n">num_weights</span><span class="o">=</span><span class="nb">len</span><span class="p">(</span><span class="n">new_weights</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">]),</span>
<span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">preloaded_multi_sgd_update</span><span class="p">(</span>
<span class="o">*</span><span class="p">(</span><span class="n">_flatten_list</span><span class="p">(</span><span class="nb">zip</span><span class="p">(</span><span class="n">new_weights</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="n">new_grads</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">]))</span> <span class="o">+</span>
<span class="p">[</span><span class="n">new_lrs</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span> <span class="n">new_wds</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">]]),</span>
<span class="n">out</span><span class="o">=</span><span class="n">new_weights</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="n">num_weights</span><span class="o">=</span><span class="nb">len</span><span class="p">(</span><span class="n">new_weights</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">]),</span>
<span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
<span class="n">preloaded_multi_mp_sgd_mom_update</span><span class="p">(</span>
<span class="o">*</span><span class="p">(</span><span class="n">_flatten_list</span><span class="p">(</span><span class="nb">zip</span><span class="p">(</span><span class="n">new_weights</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="n">new_grads</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="o">*</span><span class="nb">zip</span><span class="p">(</span><span class="o">*</span><span class="n">new_states</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">])))</span> <span class="o">+</span>
<span class="p">[</span><span class="n">new_lrs</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span> <span class="n">new_wds</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">]]),</span>
<span class="n">out</span><span class="o">=</span><span class="n">new_weights</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="n">num_weights</span><span class="o">=</span><span class="nb">len</span><span class="p">(</span><span class="n">new_weights</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">]),</span>
<span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">preloaded_multi_mp_sgd_update</span><span class="p">(</span>
<span class="o">*</span><span class="p">(</span><span class="n">_flatten_list</span><span class="p">(</span><span class="nb">zip</span><span class="p">(</span><span class="n">new_weights</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="n">new_grads</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="nb">list</span><span class="p">(</span><span class="nb">zip</span><span class="p">(</span><span class="o">*</span><span class="n">new_states</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">]))[</span><span class="mi">1</span><span class="p">]))</span> <span class="o">+</span>
<span class="p">[</span><span class="n">new_lrs</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span> <span class="n">new_wds</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">]]),</span>
<span class="n">out</span><span class="o">=</span><span class="n">new_weights</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="n">num_weights</span><span class="o">=</span><span class="nb">len</span><span class="p">(</span><span class="n">new_weights</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">]),</span>
<span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="n">sidx</span> <span class="o">+=</span> <span class="bp">self</span><span class="o">.</span><span class="n">aggregate_num</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">lrs</span> <span class="o">=</span> <span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">_get_lars</span><span class="p">(</span><span class="n">i</span><span class="p">,</span> <span class="n">w</span><span class="p">,</span> <span class="n">g</span><span class="p">,</span> <span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="p">)</span> <span class="k">for</span> <span class="p">(</span><span class="n">i</span><span class="p">,</span> <span class="n">w</span><span class="p">,</span> <span class="n">g</span><span class="p">,</span> <span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="p">)</span> <span class="ow">in</span>
<span class="nb">zip</span><span class="p">(</span><span class="n">indices</span><span class="p">,</span> <span class="n">weights</span><span class="p">,</span> <span class="n">grads</span><span class="p">,</span> <span class="n">lrs</span><span class="p">,</span> <span class="n">wds</span><span class="p">)]</span>
<span class="k">for</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">,</span> <span class="n">lr</span><span class="p">,</span> <span class="n">wd</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">(</span><span class="n">weights</span><span class="p">,</span> <span class="n">grads</span><span class="p">,</span> <span class="n">states</span><span class="p">,</span> <span class="n">lrs</span><span class="p">,</span> <span class="n">wds</span><span class="p">):</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">multi_precision</span><span class="p">:</span>
<span class="k">if</span> <span class="n">state</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">sgd_mom_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span>
<span class="n">lazy_update</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">lazy_update</span><span class="p">,</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">sgd_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span> <span class="n">lazy_update</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">lazy_update</span><span class="p">,</span>
<span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">if</span> <span class="n">state</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">mp_sgd_mom_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">state</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span>
<span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">mp_sgd_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span>
<span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<div class="viewcode-block" id="LARS.update"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.LARS.update">[docs]</a> <span class="k">def</span> <span class="nf">update</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_impl</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">,</span> <span class="n">multi_precision</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span></div>
<div class="viewcode-block" id="LARS.update_multi_precision"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.LARS.update_multi_precision">[docs]</a> <span class="k">def</span> <span class="nf">update_multi_precision</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="p">(</span><span class="nb">tuple</span><span class="p">,</span> <span class="nb">list</span><span class="p">)):</span>
<span class="n">use_multi_precision</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">multi_precision</span> <span class="ow">and</span> <span class="n">weight</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">numpy</span><span class="o">.</span><span class="n">float16</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">use_multi_precision</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">multi_precision</span> <span class="ow">and</span> <span class="n">weight</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">numpy</span><span class="o">.</span><span class="n">float16</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_impl</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">,</span>
<span class="n">multi_precision</span><span class="o">=</span><span class="n">use_multi_precision</span><span class="p">)</span></div></div>
<span class="c1">#</span>
<div class="viewcode-block" id="LBSGD"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.LBSGD">[docs]</a><span class="nd">@register</span>
<span class="k">class</span> <span class="nc">LBSGD</span><span class="p">(</span><span class="n">Optimizer</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;The Large Batch SGD optimizer with momentum and weight decay.</span>
<span class="sd"> The optimizer updates the weight by::</span>
<span class="sd"> state = momentum * state + lr * rescale_grad * clip(grad, clip_gradient) + wd * weight</span>
<span class="sd"> weight = weight - state</span>
<span class="sd"> For details of the update algorithm see :class:`~mxnet.ndarray.sgd_update`</span>
<span class="sd"> and :class:`~mxnet.ndarray.sgd_mom_update`.</span>
<span class="sd"> In addition to the SGD updates the LBSGD optimizer uses the LARS, Layer-wise</span>
<span class="sd"> Adaptive Rate Scaling, algorithm to have a separate learning rate for each</span>
<span class="sd"> layer of the network, which leads to better stability over large batch sizes.</span>
<span class="sd"> This optimizer accepts the following parameters in addition to those accepted</span>
<span class="sd"> by :class:`.Optimizer`.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> momentum : float, optional</span>
<span class="sd"> The momentum value.</span>
<span class="sd"> multi_precision: bool, optional</span>
<span class="sd"> Flag to control the internal precision of the optimizer.</span>
<span class="sd"> False: results in using the same precision as the weights (default),</span>
<span class="sd"> True: makes internal 32-bit copy of the weights and applies gradients</span>
<span class="sd"> in 32-bit precision even if actual weights used in the model have lower precision.</span>
<span class="sd"> Turning this on can improve convergence and accuracy when training with float16.</span>
<span class="sd"> warmup_strategy: string (&#39;linear&#39;, &#39;power2&#39;, &#39;sqrt&#39;. , &#39;lars&#39; default : &#39;linear&#39;)</span>
<span class="sd"> warmup_epochs: unsigned, default: 5</span>
<span class="sd"> batch_scale: unsigned, default: 1 (same as batch size * numworkers)</span>
<span class="sd"> updates_per_epoch: updates_per_epoch (default: 32, Default might not reflect true number batches per epoch. Used for warmup.)</span>
<span class="sd"> begin_epoch: unsigned, default 0, starting epoch.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="mf">0.0</span><span class="p">,</span> <span class="n">multi_precision</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">warmup_strategy</span><span class="o">=</span><span class="s1">&#39;linear&#39;</span><span class="p">,</span>
<span class="n">warmup_epochs</span><span class="o">=</span><span class="mi">5</span><span class="p">,</span> <span class="n">batch_scale</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span> <span class="n">updates_per_epoch</span><span class="o">=</span><span class="mi">32</span><span class="p">,</span> <span class="n">begin_epoch</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> <span class="n">num_epochs</span><span class="o">=</span><span class="mi">60</span><span class="p">,</span>
<span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="nb">super</span><span class="p">(</span><span class="n">LBSGD</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="n">logging</span><span class="o">.</span><span class="n">info</span><span class="p">(</span><span class="s1">&#39;Running Large-Batch SGD Algorithm&#39;</span><span class="p">)</span>
<span class="n">logging</span><span class="o">.</span><span class="n">info</span><span class="p">(</span><span class="s1">&#39;(Batch_scale=</span><span class="si">%f</span><span class="s1">, warmup_epochs=</span><span class="si">%d</span><span class="s1">, warmup_strategy=</span><span class="si">%s</span><span class="s1">, updates_per_epoch=</span><span class="si">%d</span><span class="s1">)&#39;</span><span class="p">,</span>
<span class="n">batch_scale</span><span class="p">,</span> <span class="n">warmup_epochs</span><span class="p">,</span> <span class="n">warmup_strategy</span><span class="p">,</span> <span class="n">updates_per_epoch</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">=</span> <span class="n">momentum</span>
<span class="bp">self</span><span class="o">.</span><span class="n">multi_precision</span> <span class="o">=</span> <span class="n">multi_precision</span>
<span class="c1"># new user parameters for large batch</span>
<span class="bp">self</span><span class="o">.</span><span class="n">warmup_strategy</span> <span class="o">=</span> <span class="n">warmup_strategy</span>
<span class="bp">self</span><span class="o">.</span><span class="n">warmup_epochs</span> <span class="o">=</span> <span class="n">warmup_epochs</span>
<span class="bp">self</span><span class="o">.</span><span class="n">batch_scale</span> <span class="o">=</span> <span class="n">batch_scale</span>
<span class="bp">self</span><span class="o">.</span><span class="n">updates_per_epoch</span> <span class="o">=</span> <span class="n">updates_per_epoch</span>
<span class="bp">self</span><span class="o">.</span><span class="n">init_updates</span> <span class="o">=</span> <span class="n">begin_epoch</span> <span class="o">*</span> <span class="n">updates_per_epoch</span>
<span class="bp">self</span><span class="o">.</span><span class="n">num_epochs</span> <span class="o">=</span> <span class="n">num_epochs</span>
<span class="c1"># addl internal usage parameters and storage</span>
<span class="bp">self</span><span class="o">.</span><span class="n">lbmult</span> <span class="o">=</span> <span class="mi">1</span>
<span class="bp">self</span><span class="o">.</span><span class="n">cumgrads</span> <span class="o">=</span> <span class="p">{}</span>
<span class="c1"># for adaptive lr</span>
<span class="bp">self</span><span class="o">.</span><span class="n">adaptive</span> <span class="o">=</span> <span class="kc">False</span>
<span class="bp">self</span><span class="o">.</span><span class="n">admult</span> <span class="o">=</span> <span class="mi">1</span> <span class="c1"># adaptation constant</span>
<div class="viewcode-block" id="LBSGD.create_state"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.LBSGD.create_state">[docs]</a> <span class="k">def</span> <span class="nf">create_state</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="n">momentum</span> <span class="o">=</span> <span class="kc">None</span>
<span class="n">weight_master_copy</span> <span class="o">=</span> <span class="kc">None</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">multi_precision</span> <span class="ow">and</span> <span class="n">weight</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">numpy</span><span class="o">.</span><span class="n">float16</span><span class="p">:</span>
<span class="n">weight_master_copy</span> <span class="o">=</span> <span class="n">array</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">ctx</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">numpy</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">!=</span> <span class="mf">0.0</span><span class="p">:</span>
<span class="n">momentum</span> <span class="o">=</span> <span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">numpy</span><span class="o">.</span><span class="n">float32</span><span class="p">,</span>
<span class="n">stype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">stype</span><span class="p">)</span>
<span class="k">return</span> <span class="p">(</span><span class="n">momentum</span><span class="p">,</span> <span class="n">weight_master_copy</span><span class="p">)</span>
<span class="k">if</span> <span class="n">weight</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">numpy</span><span class="o">.</span><span class="n">float16</span> <span class="ow">and</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">multi_precision</span><span class="p">:</span>
<span class="n">warnings</span><span class="o">.</span><span class="n">warn</span><span class="p">(</span><span class="s2">&quot;Accumulating with float16 in optimizer can lead to &quot;</span>
<span class="s2">&quot;poor accuracy or slow convergence. &quot;</span>
<span class="s2">&quot;Consider using multi_precision=True option of the &quot;</span>
<span class="s2">&quot;SGD optimizer&quot;</span><span class="p">)</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">!=</span> <span class="mf">0.0</span><span class="p">:</span>
<span class="n">momentum</span> <span class="o">=</span> <span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">dtype</span><span class="p">,</span> <span class="n">stype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">stype</span><span class="p">)</span>
<span class="k">return</span> <span class="n">momentum</span></div>
<span class="k">def</span> <span class="nf">_get_lbmult</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">nup</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Returns lr scaling factor for large batch according to warmup schedule</span>
<span class="sd"> (to be implemented)</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">nwup</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">warmup_epochs</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">updates_per_epoch</span>
<span class="n">strategy</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">warmup_strategy</span>
<span class="n">maxmult</span> <span class="o">=</span> <span class="nb">float</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">batch_scale</span><span class="p">)</span>
<span class="k">if</span> <span class="n">nup</span> <span class="o">&gt;=</span> <span class="n">nwup</span><span class="p">:</span>
<span class="n">mult</span> <span class="o">=</span> <span class="n">maxmult</span>
<span class="k">elif</span> <span class="n">nwup</span> <span class="o">&lt;=</span> <span class="mi">1</span><span class="p">:</span>
<span class="n">mult</span> <span class="o">=</span> <span class="mf">1.0</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">if</span> <span class="p">(</span><span class="n">strategy</span> <span class="o">==</span> <span class="s1">&#39;linear&#39;</span><span class="p">):</span>
<span class="n">mult</span> <span class="o">=</span> <span class="mf">1.0</span> <span class="o">+</span> <span class="p">(</span><span class="n">maxmult</span> <span class="o">-</span> <span class="mi">1</span><span class="p">)</span> <span class="o">*</span> <span class="n">nup</span> <span class="o">/</span> <span class="n">nwup</span>
<span class="k">elif</span> <span class="p">(</span><span class="n">strategy</span> <span class="o">==</span> <span class="s1">&#39;power2&#39;</span><span class="p">):</span>
<span class="n">mult</span> <span class="o">=</span> <span class="mf">1.0</span> <span class="o">+</span> <span class="p">(</span><span class="n">maxmult</span><span class="o">-</span><span class="mi">1</span><span class="p">)</span> <span class="o">*</span> <span class="p">(</span><span class="n">nup</span><span class="o">*</span><span class="n">nup</span><span class="p">)</span><span class="o">/</span><span class="p">(</span><span class="n">nwup</span><span class="o">*</span><span class="n">nwup</span><span class="p">)</span>
<span class="k">elif</span> <span class="p">(</span><span class="n">strategy</span> <span class="o">==</span> <span class="s1">&#39;sqrt&#39;</span><span class="p">):</span>
<span class="n">mult</span> <span class="o">=</span> <span class="mf">1.0</span> <span class="o">+</span> <span class="p">(</span><span class="n">maxmult</span> <span class="o">-</span> <span class="mi">1</span><span class="p">)</span> <span class="o">*</span> <span class="n">math</span><span class="o">.</span><span class="n">sqrt</span><span class="p">(</span><span class="nb">float</span><span class="p">(</span><span class="n">nup</span><span class="p">)</span> <span class="o">/</span> <span class="n">nwup</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">mult</span> <span class="o">=</span> <span class="mf">1.0</span>
<span class="k">return</span> <span class="n">mult</span>
<span class="k">def</span> <span class="nf">_get_lars</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">g</span><span class="p">,</span> <span class="n">wd</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Returns a scaling factor for the learning rate for this layer</span>
<span class="sd"> default is 1</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">weight2</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_l2norm</span><span class="p">(</span><span class="n">weight</span><span class="p">)</span>
<span class="n">grad2</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_l2norm</span><span class="p">(</span><span class="n">g</span><span class="p">)</span>
<span class="n">lars</span> <span class="o">=</span> <span class="n">math</span><span class="o">.</span><span class="n">sqrt</span><span class="p">(</span><span class="n">weight2</span> <span class="o">/</span> <span class="p">(</span><span class="n">grad2</span> <span class="o">+</span> <span class="n">wd</span> <span class="o">*</span> <span class="n">weight2</span> <span class="o">+</span> <span class="mf">1e-18</span><span class="p">))</span>
<span class="k">if</span> <span class="n">lars</span> <span class="o">&lt;</span> <span class="mf">0.01</span><span class="p">:</span>
<span class="n">lars</span> <span class="o">=</span> <span class="mf">0.01</span>
<span class="k">elif</span> <span class="n">lars</span> <span class="o">&gt;</span> <span class="mi">100</span><span class="p">:</span>
<span class="n">lars</span> <span class="o">=</span> <span class="mi">100</span>
<span class="k">return</span> <span class="n">lars</span>
<span class="k">def</span> <span class="nf">_l2norm</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">v</span><span class="p">):</span>
<span class="s2">&quot;inner product implementation&quot;</span>
<span class="n">norm</span> <span class="o">=</span> <span class="n">multiply</span><span class="p">(</span><span class="n">v</span><span class="p">,</span> <span class="n">v</span><span class="p">)</span><span class="o">.</span><span class="n">asnumpy</span><span class="p">()</span><span class="o">.</span><span class="n">sum</span><span class="p">()</span>
<span class="k">return</span> <span class="n">norm</span>
<span class="k">def</span> <span class="nf">_reset_cum_gradient</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">):</span>
<span class="s2">&quot;called every macro-batch to reset cumulated gradients to 0 for a given index&quot;</span>
<span class="bp">self</span><span class="o">.</span><span class="n">cumgrads</span><span class="p">[</span><span class="n">index</span><span class="p">][</span><span class="s1">&#39;cum_grad&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="mi">0</span>
<span class="k">def</span> <span class="nf">_get_cum_gradient</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">):</span>
<span class="s2">&quot;get the cumulated gradient for index&quot;</span>
<span class="k">if</span> <span class="n">index</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">cumgrads</span><span class="p">:</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">cumgrads</span><span class="p">[</span><span class="n">index</span><span class="p">]</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">return</span> <span class="p">{}</span>
<span class="k">def</span> <span class="nf">_put_cum_gradient</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">cgrad</span><span class="p">):</span>
<span class="s2">&quot;store cumulated gradient for index&quot;</span>
<span class="bp">self</span><span class="o">.</span><span class="n">cumgrads</span><span class="p">[</span><span class="n">index</span><span class="p">]</span> <span class="o">=</span> <span class="n">cgrad</span>
<span class="k">def</span> <span class="nf">_cumulate_gradient</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">index</span><span class="p">):</span>
<span class="s2">&quot;Cumulate gradients for large-batch emulation. Cumulated by index (layer)&quot;</span>
<span class="n">cgrad</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_cum_gradient</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="k">if</span> <span class="n">cgrad</span><span class="p">:</span>
<span class="n">num_cums</span> <span class="o">=</span> <span class="n">cgrad</span><span class="p">[</span><span class="s1">&#39;num_cums&#39;</span><span class="p">]</span>
<span class="k">if</span> <span class="n">num_cums</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
<span class="n">cum_grad</span> <span class="o">=</span> <span class="n">cgrad</span><span class="p">[</span><span class="s1">&#39;cum_grad&#39;</span><span class="p">]</span> <span class="o">+</span> <span class="n">grad</span>
<span class="n">num_cums</span> <span class="o">+=</span> <span class="mi">1</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">cum_grad</span> <span class="o">=</span> <span class="n">grad</span>
<span class="n">num_cums</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">init_updates</span> <span class="o">+</span> <span class="mi">1</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">cum_grad</span> <span class="o">=</span> <span class="n">grad</span>
<span class="n">num_cums</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">init_updates</span> <span class="o">+</span> <span class="mi">1</span>
<span class="n">cgrad</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;cum_grad&#39;</span><span class="p">:</span> <span class="n">cum_grad</span><span class="p">,</span> <span class="s1">&#39;num_cums&#39;</span><span class="p">:</span> <span class="n">num_cums</span><span class="p">}</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_put_cum_gradient</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="n">cgrad</span><span class="p">)</span>
<span class="k">return</span> <span class="n">cgrad</span>
<div class="viewcode-block" id="LBSGD.update"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.LBSGD.update">[docs]</a> <span class="k">def</span> <span class="nf">update</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="k">assert</span> <span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="k">assert</span> <span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="n">lr</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_lr</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">wd</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_wd</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_count</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="c1"># new stuff for large batch</span>
<span class="n">cgrad</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_cumulate_gradient</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="n">index</span><span class="p">)</span>
<span class="k">if</span> <span class="p">(</span><span class="n">cgrad</span><span class="p">[</span><span class="s1">&#39;num_cums&#39;</span><span class="p">]</span> <span class="o">%</span> <span class="bp">self</span><span class="o">.</span><span class="n">batch_scale</span><span class="p">)</span> <span class="o">==</span> <span class="mi">0</span><span class="p">:</span>
<span class="n">grad</span> <span class="o">=</span> <span class="n">cgrad</span><span class="p">[</span><span class="s1">&#39;cum_grad&#39;</span><span class="p">]</span> <span class="o">/</span> <span class="bp">self</span><span class="o">.</span><span class="n">batch_scale</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">warmup_strategy</span> <span class="o">==</span> <span class="s1">&#39;lars&#39;</span><span class="p">:</span>
<span class="n">lbmult</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_lars</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">wd</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">lbmult</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_lbmult</span><span class="p">(</span><span class="n">cgrad</span><span class="p">[</span><span class="s1">&#39;num_cums&#39;</span><span class="p">])</span>
<span class="n">lr</span> <span class="o">=</span> <span class="n">lr</span> <span class="o">*</span> <span class="n">lbmult</span>
<span class="c1"># do the regular sgd update flow</span>
<span class="n">kwargs</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;rescale_grad&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">rescale_grad</span><span class="p">}</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;momentum&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;clip_gradient&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span>
<span class="n">use_multi_precision</span> <span class="o">=</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">state</span><span class="p">,</span> <span class="p">(</span><span class="nb">list</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">))</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">use_multi_precision</span><span class="p">:</span>
<span class="k">if</span> <span class="n">state</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">sgd_mom_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">sgd_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">if</span> <span class="n">state</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">mp_sgd_mom_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">state</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span>
<span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">mp_sgd_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="c1"># reset update count and cumulated gradient per large batch</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_reset_cum_gradient</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">lr</span> <span class="o">=</span> <span class="mf">0.0</span>
<span class="n">kwargs</span> <span class="o">=</span> <span class="p">{}</span>
<span class="n">sgd_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span></div></div>
<div class="viewcode-block" id="LAMB"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.LAMB">[docs]</a><span class="nd">@register</span>
<span class="k">class</span> <span class="nc">LAMB</span><span class="p">(</span><span class="n">Optimizer</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;LAMB Optimizer.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">learning_rate</span><span class="o">=</span><span class="mf">0.001</span><span class="p">,</span> <span class="n">beta1</span><span class="o">=</span><span class="mf">0.9</span><span class="p">,</span> <span class="n">beta2</span><span class="o">=</span><span class="mf">0.999</span><span class="p">,</span> <span class="n">epsilon</span><span class="o">=</span><span class="mf">1e-6</span><span class="p">,</span>
<span class="n">lower_bound</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">upper_bound</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">bias_correction</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="nb">super</span><span class="p">(</span><span class="n">LAMB</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">learning_rate</span><span class="o">=</span><span class="n">learning_rate</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">beta1</span> <span class="o">=</span> <span class="n">beta1</span>
<span class="bp">self</span><span class="o">.</span><span class="n">beta2</span> <span class="o">=</span> <span class="n">beta2</span>
<span class="bp">self</span><span class="o">.</span><span class="n">epsilon</span> <span class="o">=</span> <span class="n">epsilon</span>
<span class="bp">self</span><span class="o">.</span><span class="n">lower_bound</span> <span class="o">=</span> <span class="n">lower_bound</span>
<span class="bp">self</span><span class="o">.</span><span class="n">upper_bound</span> <span class="o">=</span> <span class="n">upper_bound</span>
<span class="bp">self</span><span class="o">.</span><span class="n">bias_correction</span> <span class="o">=</span> <span class="n">bias_correction</span>
<span class="bp">self</span><span class="o">.</span><span class="n">aggregate_num</span> <span class="o">=</span> <span class="nb">max</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="nb">min</span><span class="p">(</span><span class="mi">45</span><span class="p">,</span> <span class="nb">int</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">getenv</span><span class="p">(</span><span class="s1">&#39;MXNET_OPTIMIZER_AGGREGATION_SIZE&#39;</span><span class="p">,</span> <span class="s2">&quot;45&quot;</span><span class="p">))))</span>
<div class="viewcode-block" id="LAMB.create_state"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.LAMB.create_state">[docs]</a> <span class="k">def</span> <span class="nf">create_state</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="n">stype</span> <span class="o">=</span> <span class="n">weight</span><span class="o">.</span><span class="n">stype</span>
<span class="n">dtype</span> <span class="o">=</span> <span class="n">weight</span><span class="o">.</span><span class="n">dtype</span>
<span class="k">return</span> <span class="p">(</span><span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">dtype</span><span class="p">,</span> <span class="n">stype</span><span class="o">=</span><span class="n">stype</span><span class="p">),</span>
<span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">dtype</span><span class="p">,</span> <span class="n">stype</span><span class="o">=</span><span class="n">stype</span><span class="p">))</span></div>
<span class="k">def</span> <span class="nf">_update_impl</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">,</span> <span class="n">multi_precision</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
<span class="n">kwargs</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;beta1&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta1</span><span class="p">,</span> <span class="s1">&#39;beta2&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta2</span><span class="p">,</span> <span class="s1">&#39;epsilon&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">epsilon</span><span class="p">,</span>
<span class="s1">&#39;bias_correction&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">bias_correction</span><span class="p">,</span>
<span class="s1">&#39;rescale_grad&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">rescale_grad</span><span class="p">}</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">aggregate_num</span> <span class="o">&lt;=</span> <span class="mi">1</span> <span class="ow">or</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="p">(</span><span class="nb">tuple</span><span class="p">,</span> <span class="nb">list</span><span class="p">)):</span>
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="p">(</span><span class="nb">tuple</span><span class="p">,</span> <span class="nb">list</span><span class="p">)):</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">index</span><span class="p">)</span> <span class="o">==</span> <span class="bp">self</span><span class="o">.</span><span class="n">aggregate_num</span><span class="p">)</span>
<span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span> <span class="o">=</span> <span class="n">index</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">weight</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">grad</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">state</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_count</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">lr</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_lr</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">wd</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_wd</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">t</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_index_update_count</span><span class="p">[</span><span class="n">index</span><span class="p">]</span>
<span class="n">weight_ptr</span> <span class="o">=</span> <span class="n">weight</span>
<span class="n">grad_ptr</span> <span class="o">=</span> <span class="n">grad</span>
<span class="k">if</span> <span class="n">multi_precision</span><span class="p">:</span>
<span class="n">mean</span><span class="p">,</span> <span class="n">var</span> <span class="o">=</span> <span class="n">state</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span>
<span class="n">weight32</span> <span class="o">=</span> <span class="n">state</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">mean</span><span class="p">,</span> <span class="n">var</span> <span class="o">=</span> <span class="n">state</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;t&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">t</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;clip_gradient&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span>
<span class="k">if</span> <span class="n">multi_precision</span><span class="p">:</span>
<span class="n">g</span> <span class="o">=</span> <span class="n">mp_lamb_update_phase1</span><span class="p">(</span><span class="n">weight_ptr</span><span class="p">,</span> <span class="n">grad_ptr</span><span class="p">,</span> <span class="n">mean</span><span class="p">,</span> <span class="n">var</span><span class="p">,</span> <span class="n">weight32</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="n">kwargs</span> <span class="o">=</span> <span class="p">{}</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">lower_bound</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;lower_bound&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">lower_bound</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">upper_bound</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;upper_bound&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">upper_bound</span>
<span class="n">r_1</span> <span class="o">=</span> <span class="n">weight32</span><span class="o">.</span><span class="n">norm</span><span class="p">()</span>
<span class="n">r_2</span> <span class="o">=</span> <span class="n">g</span><span class="o">.</span><span class="n">norm</span><span class="p">()</span>
<span class="n">mp_lamb_update_phase2</span><span class="p">(</span><span class="n">weight_ptr</span><span class="p">,</span> <span class="n">g</span><span class="p">,</span> <span class="n">r_1</span><span class="p">,</span> <span class="n">r_2</span><span class="p">,</span> <span class="n">weight32</span><span class="p">,</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">weight_ptr</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">g</span> <span class="o">=</span> <span class="n">lamb_update_phase1</span><span class="p">(</span><span class="n">weight_ptr</span><span class="p">,</span> <span class="n">grad_ptr</span><span class="p">,</span> <span class="n">mean</span><span class="p">,</span> <span class="n">var</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="n">kwargs</span> <span class="o">=</span> <span class="p">{}</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">lower_bound</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;lower_bound&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">lower_bound</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">upper_bound</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;upper_bound&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">upper_bound</span>
<span class="n">r_1</span> <span class="o">=</span> <span class="n">weight_ptr</span><span class="o">.</span><span class="n">norm</span><span class="p">()</span>
<span class="n">r_2</span> <span class="o">=</span> <span class="n">g</span><span class="o">.</span><span class="n">norm</span><span class="p">()</span>
<span class="n">lamb_update_phase2</span><span class="p">(</span><span class="n">weight_ptr</span><span class="p">,</span> <span class="n">g</span><span class="p">,</span> <span class="n">r_1</span><span class="p">,</span> <span class="n">r_2</span><span class="p">,</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">weight_ptr</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;clip_gradient&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">lower_bound</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;lower_bound&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">lower_bound</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">upper_bound</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;upper_bound&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">upper_bound</span>
<span class="n">step_count</span><span class="p">,</span> <span class="n">lrs</span><span class="p">,</span> <span class="n">wds</span> <span class="o">=</span> <span class="p">[],</span> <span class="p">[],</span> <span class="p">[]</span>
<span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">w_i</span><span class="p">,</span> <span class="n">g_i</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">):</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">w_i</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">g_i</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_count</span><span class="p">(</span><span class="n">i</span><span class="p">)</span>
<span class="n">step_count</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_index_update_count</span><span class="p">[</span><span class="n">i</span><span class="p">])</span>
<span class="n">lrs</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_get_lr</span><span class="p">(</span><span class="n">i</span><span class="p">))</span>
<span class="n">wds</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_get_wd</span><span class="p">(</span><span class="n">i</span><span class="p">))</span>
<span class="n">updated_tensors</span> <span class="o">=</span> <span class="mi">0</span>
<span class="k">while</span> <span class="n">updated_tensors</span> <span class="o">&lt;</span> <span class="nb">len</span><span class="p">(</span><span class="n">weight</span><span class="p">):</span>
<span class="n">sidx</span> <span class="o">=</span> <span class="n">updated_tensors</span>
<span class="n">eidx</span> <span class="o">=</span> <span class="nb">min</span><span class="p">(</span><span class="n">updated_tensors</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">aggregate_num</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">weight</span><span class="p">))</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">multi_precision</span><span class="p">:</span>
<span class="n">mean</span><span class="p">,</span> <span class="n">var</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="nb">zip</span><span class="p">(</span><span class="o">*</span><span class="n">state</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">]))</span>
<span class="n">multi_lamb_update</span><span class="p">(</span><span class="n">weight</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="n">grad</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="n">mean</span><span class="p">,</span> <span class="n">var</span><span class="p">,</span>
<span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="n">step_count</span><span class="o">=</span><span class="n">step_count</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="n">lrs</span><span class="o">=</span><span class="n">lrs</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="n">wds</span><span class="o">=</span><span class="n">wds</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">mean_var</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="nb">zip</span><span class="p">(</span><span class="o">*</span><span class="n">state</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">]))[</span><span class="mi">1</span><span class="p">]</span>
<span class="n">temp</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="nb">zip</span><span class="p">(</span><span class="o">*</span><span class="n">mean_var</span><span class="p">))</span>
<span class="n">mean</span> <span class="o">=</span> <span class="n">temp</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
<span class="n">var</span> <span class="o">=</span> <span class="n">temp</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span>
<span class="n">multi_mp_lamb_update</span><span class="p">(</span><span class="n">weight</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="n">grad</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="n">mean</span><span class="p">,</span> <span class="n">var</span><span class="p">,</span>
<span class="nb">list</span><span class="p">(</span><span class="nb">zip</span><span class="p">(</span><span class="o">*</span><span class="n">state</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">]))[</span><span class="mi">0</span><span class="p">],</span>
<span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="n">step_count</span><span class="o">=</span><span class="n">step_count</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="n">lrs</span><span class="o">=</span><span class="n">lrs</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="n">wds</span><span class="o">=</span><span class="n">wds</span><span class="p">[</span><span class="n">sidx</span><span class="p">:</span><span class="n">eidx</span><span class="p">],</span>
<span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="n">updated_tensors</span> <span class="o">+=</span> <span class="bp">self</span><span class="o">.</span><span class="n">aggregate_num</span>
<div class="viewcode-block" id="LAMB.update"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.LAMB.update">[docs]</a> <span class="k">def</span> <span class="nf">update</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_impl</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">,</span> <span class="n">multi_precision</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span></div>
<div class="viewcode-block" id="LAMB.update_multi_precision"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.LAMB.update_multi_precision">[docs]</a> <span class="k">def</span> <span class="nf">update_multi_precision</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="p">(</span><span class="nb">tuple</span><span class="p">,</span> <span class="nb">list</span><span class="p">)):</span>
<span class="n">use_multi_precision</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">multi_precision</span> <span class="ow">and</span> <span class="n">weight</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">numpy</span><span class="o">.</span><span class="n">float16</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">use_multi_precision</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">multi_precision</span> <span class="ow">and</span> <span class="n">weight</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">numpy</span><span class="o">.</span><span class="n">float16</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_impl</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">,</span>
<span class="n">multi_precision</span><span class="o">=</span><span class="n">use_multi_precision</span><span class="p">)</span></div></div>
<span class="c1"># pylint: enable=line-too-long</span>
<div class="viewcode-block" id="DCASGD"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.DCASGD">[docs]</a><span class="nd">@register</span>
<span class="k">class</span> <span class="nc">DCASGD</span><span class="p">(</span><span class="n">Optimizer</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;The DCASGD optimizer.</span>
<span class="sd"> This class implements the optimizer described in *Asynchronous Stochastic Gradient Descent</span>
<span class="sd"> with Delay Compensation for Distributed Deep Learning*,</span>
<span class="sd"> available at https://arxiv.org/abs/1609.08326.</span>
<span class="sd"> This optimizer accepts the following parameters in addition to those accepted</span>
<span class="sd"> by :class:`.Optimizer`.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> momentum : float, optional</span>
<span class="sd"> The momentum value.</span>
<span class="sd"> lamda : float, optional</span>
<span class="sd"> Scale DC value.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="mf">0.0</span><span class="p">,</span> <span class="n">lamda</span><span class="o">=</span><span class="mf">0.04</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="nb">super</span><span class="p">(</span><span class="n">DCASGD</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">=</span> <span class="n">momentum</span>
<span class="bp">self</span><span class="o">.</span><span class="n">weight_previous</span> <span class="o">=</span> <span class="p">{}</span>
<span class="bp">self</span><span class="o">.</span><span class="n">lamda</span> <span class="o">=</span> <span class="n">lamda</span>
<div class="viewcode-block" id="DCASGD.create_state"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.DCASGD.create_state">[docs]</a> <span class="k">def</span> <span class="nf">create_state</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">==</span> <span class="mf">0.0</span><span class="p">:</span>
<span class="k">return</span> <span class="p">(</span><span class="kc">None</span><span class="p">,</span>
<span class="n">weight</span><span class="o">.</span><span class="n">copy</span><span class="p">())</span> <span class="c1"># previous weight</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">return</span> <span class="p">(</span><span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">dtype</span><span class="p">),</span> <span class="c1"># momentum</span>
<span class="n">weight</span><span class="o">.</span><span class="n">copy</span><span class="p">())</span> <span class="c1"># previous weight</span></div>
<div class="viewcode-block" id="DCASGD.update"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.DCASGD.update">[docs]</a> <span class="k">def</span> <span class="nf">update</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_count</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">lr</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_lr</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">wd</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_wd</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">grad</span> <span class="o">=</span> <span class="n">grad</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">rescale_grad</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">grad</span> <span class="o">=</span> <span class="n">clip</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="o">-</span><span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">)</span>
<span class="n">mom</span><span class="p">,</span> <span class="n">previous_weight</span> <span class="o">=</span> <span class="n">state</span>
<span class="k">if</span> <span class="n">mom</span><span class="p">:</span>
<span class="n">mom</span><span class="p">[:]</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span>
<span class="n">mom</span><span class="p">[:]</span> <span class="o">+=</span> <span class="o">-</span><span class="n">lr</span> <span class="o">*</span> <span class="p">(</span><span class="n">grad</span> <span class="o">+</span> <span class="n">wd</span> <span class="o">*</span> <span class="n">weight</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">lamda</span> \
<span class="o">*</span> <span class="n">grad</span> <span class="o">*</span> <span class="n">grad</span> <span class="o">*</span> <span class="p">(</span><span class="n">weight</span> <span class="o">-</span> <span class="n">previous_weight</span><span class="p">))</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">assert</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">==</span> <span class="mf">0.0</span><span class="p">)</span>
<span class="n">mom</span> <span class="o">=</span> <span class="o">-</span><span class="n">lr</span> <span class="o">*</span> <span class="p">(</span><span class="n">grad</span> <span class="o">+</span> <span class="n">wd</span> <span class="o">*</span> <span class="n">weight</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">lamda</span> \
<span class="o">*</span> <span class="n">grad</span> <span class="o">*</span> <span class="n">grad</span> <span class="o">*</span> <span class="p">(</span><span class="n">weight</span> <span class="o">-</span> <span class="n">previous_weight</span><span class="p">))</span>
<span class="n">previous_weight</span><span class="p">[:]</span> <span class="o">=</span> <span class="n">weight</span>
<span class="n">weight</span><span class="p">[:]</span> <span class="o">+=</span> <span class="n">mom</span></div></div>
<div class="viewcode-block" id="NAG"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.NAG">[docs]</a><span class="nd">@register</span>
<span class="k">class</span> <span class="nc">NAG</span><span class="p">(</span><span class="n">Optimizer</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Nesterov accelerated gradient.</span>
<span class="sd"> This optimizer updates each weight by::</span>
<span class="sd"> state = momentum * state + grad + wd * weight</span>
<span class="sd"> weight = weight - (lr * (grad + momentum * state))</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> momentum : float, optional</span>
<span class="sd"> The momentum value.</span>
<span class="sd"> multi_precision: bool, optional</span>
<span class="sd"> Flag to control the internal precision of the optimizer.</span>
<span class="sd"> False: results in using the same precision as the weights (default),</span>
<span class="sd"> True: makes internal 32-bit copy of the weights and applies gradients</span>
<span class="sd"> in 32-bit precision even if actual weights used in the model have lower precision.</span>
<span class="sd"> Turning this on can improve convergence and accuracy when training with float16.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="mf">0.0</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="nb">super</span><span class="p">(</span><span class="n">NAG</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">=</span> <span class="n">momentum</span>
<div class="viewcode-block" id="NAG.create_state_multi_precision"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.NAG.create_state_multi_precision">[docs]</a> <span class="k">def</span> <span class="nf">create_state_multi_precision</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="n">weight_master_copy</span> <span class="o">=</span> <span class="kc">None</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">multi_precision</span> <span class="ow">and</span> <span class="n">weight</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">numpy</span><span class="o">.</span><span class="n">float16</span><span class="p">:</span>
<span class="n">weight_master_copy</span> <span class="o">=</span> <span class="n">weight</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">numpy</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span>
<span class="k">return</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">create_state</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="n">weight_master_copy</span><span class="p">),</span> <span class="n">weight_master_copy</span><span class="p">)</span>
<span class="k">if</span> <span class="n">weight</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">numpy</span><span class="o">.</span><span class="n">float16</span> <span class="ow">and</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">multi_precision</span><span class="p">:</span>
<span class="n">warnings</span><span class="o">.</span><span class="n">warn</span><span class="p">(</span><span class="s2">&quot;Accumulating with float16 in optimizer can lead to &quot;</span>
<span class="s2">&quot;poor accuracy or slow convergence. &quot;</span>
<span class="s2">&quot;Consider using multi_precision=True option of the &quot;</span>
<span class="s2">&quot;NAG optimizer&quot;</span><span class="p">)</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">create_state</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">)</span></div>
<div class="viewcode-block" id="NAG.create_state"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.NAG.create_state">[docs]</a> <span class="k">def</span> <span class="nf">create_state</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="n">momentum</span> <span class="o">=</span> <span class="kc">None</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">!=</span> <span class="mf">0.0</span><span class="p">:</span>
<span class="n">momentum</span> <span class="o">=</span> <span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">dtype</span><span class="p">)</span>
<span class="k">return</span> <span class="n">momentum</span></div>
<span class="k">def</span> <span class="nf">_update_impl</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">,</span> <span class="n">multi_precision</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_count</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">lr</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_lr</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">wd</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_wd</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">kwargs</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;rescale_grad&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">rescale_grad</span><span class="p">}</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;momentum&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;clip_gradient&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">multi_precision</span><span class="p">:</span>
<span class="k">if</span> <span class="n">state</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">nag_mom_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">sgd_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">if</span> <span class="n">state</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">mp_nag_mom_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">state</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span>
<span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">mp_sgd_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span>
<span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<div class="viewcode-block" id="NAG.update"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.NAG.update">[docs]</a> <span class="k">def</span> <span class="nf">update</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_impl</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">,</span> <span class="n">multi_precision</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span></div>
<div class="viewcode-block" id="NAG.update_multi_precision"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.NAG.update_multi_precision">[docs]</a> <span class="k">def</span> <span class="nf">update_multi_precision</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="n">use_multi_precision</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">multi_precision</span> <span class="ow">and</span> <span class="n">weight</span><span class="o">.</span><span class="n">dtype</span> <span class="o">==</span> <span class="n">numpy</span><span class="o">.</span><span class="n">float16</span> \
<span class="ow">and</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">state</span><span class="p">,</span> <span class="p">(</span><span class="nb">tuple</span><span class="p">,</span> <span class="nb">list</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_impl</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">,</span>
<span class="n">multi_precision</span><span class="o">=</span><span class="n">use_multi_precision</span><span class="p">)</span></div></div>
<div class="viewcode-block" id="SGLD"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.SGLD">[docs]</a><span class="nd">@register</span>
<span class="k">class</span> <span class="nc">SGLD</span><span class="p">(</span><span class="n">Optimizer</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Stochastic Gradient Riemannian Langevin Dynamics.</span>
<span class="sd"> This class implements the optimizer described in the paper *Stochastic Gradient</span>
<span class="sd"> Riemannian Langevin Dynamics on the Probability Simplex*, available at</span>
<span class="sd"> https://papers.nips.cc/paper/4883-stochastic-gradient-riemannian-langevin-dynamics-on-the-probability-simplex.pdf.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="nb">super</span><span class="p">(</span><span class="n">SGLD</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<div class="viewcode-block" id="SGLD.create_state"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.SGLD.create_state">[docs]</a> <span class="k">def</span> <span class="nf">create_state</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="k">return</span> <span class="kc">None</span></div>
<div class="viewcode-block" id="SGLD.update"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.SGLD.update">[docs]</a> <span class="k">def</span> <span class="nf">update</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_count</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">lr</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_lr</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">wd</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_wd</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">grad</span> <span class="o">=</span> <span class="n">grad</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">rescale_grad</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">grad</span> <span class="o">=</span> <span class="n">clip</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="o">-</span><span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">)</span>
<span class="n">weight</span><span class="p">[:]</span> <span class="o">+=</span> <span class="o">-</span> <span class="n">lr</span><span class="o">/</span><span class="mi">2</span> <span class="o">*</span> <span class="p">(</span><span class="n">grad</span> <span class="o">+</span> <span class="n">wd</span> <span class="o">*</span> <span class="n">weight</span><span class="p">)</span>
<span class="n">weight</span><span class="p">[:]</span> <span class="o">+=</span> <span class="n">normal</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="n">math</span><span class="o">.</span><span class="n">sqrt</span><span class="p">(</span><span class="n">lr</span><span class="p">),</span> <span class="n">shape</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span>
<span class="n">dtype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">dtype</span><span class="p">,</span> <span class="n">ctx</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">)</span></div></div>
<div class="viewcode-block" id="ccSGD"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.ccSGD">[docs]</a><span class="nd">@register</span> <span class="c1"># pylint: disable=invalid-name</span>
<span class="k">class</span> <span class="nc">ccSGD</span><span class="p">(</span><span class="n">SGD</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;[DEPRECATED] Same as `SGD`. Left here for backward compatibility.&quot;&quot;&quot;</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="o">*</span><span class="n">args</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="nb">super</span><span class="p">(</span><span class="n">ccSGD</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="o">*</span><span class="n">args</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span></div>
<div class="viewcode-block" id="Adam"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Adam">[docs]</a><span class="nd">@register</span>
<span class="k">class</span> <span class="nc">Adam</span><span class="p">(</span><span class="n">Optimizer</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;The Adam optimizer.</span>
<span class="sd"> This class implements the optimizer described in *Adam: A Method for</span>
<span class="sd"> Stochastic Optimization*, available at http://arxiv.org/abs/1412.6980.</span>
<span class="sd"> If the storage types of grad is ``row_sparse``, and ``lazy_update`` is True, \</span>
<span class="sd"> **lazy updates** at step t are applied by::</span>
<span class="sd"> for row in grad.indices:</span>
<span class="sd"> rescaled_grad[row] = clip(grad[row] * rescale_grad + wd * weight[row], clip_gradient)</span>
<span class="sd"> m[row] = beta1 * m[row] + (1 - beta1) * rescaled_grad[row]</span>
<span class="sd"> v[row] = beta2 * v[row] + (1 - beta2) * (rescaled_grad[row]**2)</span>
<span class="sd"> lr = learning_rate * sqrt(1 - beta1**t) / (1 - beta2**t)</span>
<span class="sd"> w[row] = w[row] - lr * m[row] / (sqrt(v[row]) + epsilon)</span>
<span class="sd"> The lazy update only updates the mean and var for the weights whose row_sparse</span>
<span class="sd"> gradient indices appear in the current batch, rather than updating it for all indices.</span>
<span class="sd"> Compared with the original update, it can provide large improvements in model training</span>
<span class="sd"> throughput for some applications. However, it provides slightly different semantics than</span>
<span class="sd"> the original update, and may lead to different empirical results.</span>
<span class="sd"> Otherwise, **standard updates** at step t are applied by::</span>
<span class="sd"> rescaled_grad = clip(grad * rescale_grad + wd * weight, clip_gradient)</span>
<span class="sd"> m = beta1 * m + (1 - beta1) * rescaled_grad</span>
<span class="sd"> v = beta2 * v + (1 - beta2) * (rescaled_grad**2)</span>
<span class="sd"> lr = learning_rate * sqrt(1 - beta1**t) / (1 - beta2**t)</span>
<span class="sd"> w = w - lr * m / (sqrt(v) + epsilon)</span>
<span class="sd"> This optimizer accepts the following parameters in addition to those accepted</span>
<span class="sd"> by :class:`.Optimizer`.</span>
<span class="sd"> For details of the update algorithm, see :class:`~mxnet.ndarray.adam_update`.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> beta1 : float, optional</span>
<span class="sd"> Exponential decay rate for the first moment estimates.</span>
<span class="sd"> beta2 : float, optional</span>
<span class="sd"> Exponential decay rate for the second moment estimates.</span>
<span class="sd"> epsilon : float, optional</span>
<span class="sd"> Small value to avoid division by 0.</span>
<span class="sd"> lazy_update : bool, optional</span>
<span class="sd"> Default is True. If True, lazy updates are applied \</span>
<span class="sd"> if the storage types of weight and grad are both ``row_sparse``.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">learning_rate</span><span class="o">=</span><span class="mf">0.001</span><span class="p">,</span> <span class="n">beta1</span><span class="o">=</span><span class="mf">0.9</span><span class="p">,</span> <span class="n">beta2</span><span class="o">=</span><span class="mf">0.999</span><span class="p">,</span> <span class="n">epsilon</span><span class="o">=</span><span class="mf">1e-8</span><span class="p">,</span>
<span class="n">lazy_update</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="nb">super</span><span class="p">(</span><span class="n">Adam</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">learning_rate</span><span class="o">=</span><span class="n">learning_rate</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">beta1</span> <span class="o">=</span> <span class="n">beta1</span>
<span class="bp">self</span><span class="o">.</span><span class="n">beta2</span> <span class="o">=</span> <span class="n">beta2</span>
<span class="bp">self</span><span class="o">.</span><span class="n">epsilon</span> <span class="o">=</span> <span class="n">epsilon</span>
<span class="bp">self</span><span class="o">.</span><span class="n">lazy_update</span> <span class="o">=</span> <span class="n">lazy_update</span>
<div class="viewcode-block" id="Adam.create_state"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Adam.create_state">[docs]</a> <span class="k">def</span> <span class="nf">create_state</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="n">stype</span> <span class="o">=</span> <span class="n">weight</span><span class="o">.</span><span class="n">stype</span> <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">lazy_update</span> <span class="k">else</span> <span class="s1">&#39;default&#39;</span>
<span class="k">return</span> <span class="p">(</span><span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">dtype</span><span class="p">,</span>
<span class="n">stype</span><span class="o">=</span><span class="n">stype</span><span class="p">),</span> <span class="c1"># mean</span>
<span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">dtype</span><span class="p">,</span>
<span class="n">stype</span><span class="o">=</span><span class="n">stype</span><span class="p">))</span> <span class="c1"># variance</span></div>
<div class="viewcode-block" id="Adam.update"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Adam.update">[docs]</a> <span class="k">def</span> <span class="nf">update</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_count</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">lr</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_lr</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">wd</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_wd</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">t</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_index_update_count</span><span class="p">[</span><span class="n">index</span><span class="p">]</span>
<span class="n">coef1</span> <span class="o">=</span> <span class="mf">1.</span> <span class="o">-</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta1</span><span class="o">**</span><span class="n">t</span>
<span class="n">coef2</span> <span class="o">=</span> <span class="mf">1.</span> <span class="o">-</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta2</span><span class="o">**</span><span class="n">t</span>
<span class="n">lr</span> <span class="o">*=</span> <span class="n">math</span><span class="o">.</span><span class="n">sqrt</span><span class="p">(</span><span class="n">coef2</span><span class="p">)</span><span class="o">/</span><span class="n">coef1</span>
<span class="n">kwargs</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;beta1&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta1</span><span class="p">,</span> <span class="s1">&#39;beta2&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta2</span><span class="p">,</span> <span class="s1">&#39;epsilon&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">epsilon</span><span class="p">,</span>
<span class="s1">&#39;rescale_grad&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">rescale_grad</span><span class="p">}</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;clip_gradient&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span>
<span class="n">mean</span><span class="p">,</span> <span class="n">var</span> <span class="o">=</span> <span class="n">state</span>
<span class="n">adam_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">mean</span><span class="p">,</span> <span class="n">var</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span>
<span class="n">lazy_update</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">lazy_update</span><span class="p">,</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span></div></div>
<div class="viewcode-block" id="AdaGrad"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.AdaGrad">[docs]</a><span class="nd">@register</span>
<span class="k">class</span> <span class="nc">AdaGrad</span><span class="p">(</span><span class="n">Optimizer</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;AdaGrad optimizer.</span>
<span class="sd"> This class implements the AdaGrad optimizer described in *Adaptive Subgradient</span>
<span class="sd"> Methods for Online Learning and Stochastic Optimization*, and available at</span>
<span class="sd"> http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf.</span>
<span class="sd"> This optimizer updates each weight by::</span>
<span class="sd"> grad = clip(grad * rescale_grad, clip_gradient)</span>
<span class="sd"> history += square(grad)</span>
<span class="sd"> div = grad / sqrt(history + float_stable_eps)</span>
<span class="sd"> weight += (div + weight * wd) * -lr</span>
<span class="sd"> This optimizer accepts the following parameters in addition to those accepted</span>
<span class="sd"> by :class:`.Optimizer`.</span>
<span class="sd"> See Also</span>
<span class="sd"> ----------</span>
<span class="sd"> :meth:`mxnet.ndarray.sparse.adagrad_update`.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> eps: float, optional</span>
<span class="sd"> Initial value of the history accumulator. Avoids division by 0.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">eps</span><span class="o">=</span><span class="mf">1e-7</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="nb">super</span><span class="p">(</span><span class="n">AdaGrad</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">float_stable_eps</span> <span class="o">=</span> <span class="n">eps</span>
<div class="viewcode-block" id="AdaGrad.create_state"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.AdaGrad.create_state">[docs]</a> <span class="k">def</span> <span class="nf">create_state</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="k">return</span> <span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">stype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">stype</span><span class="p">)</span> <span class="c1"># history</span></div>
<div class="viewcode-block" id="AdaGrad.update"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.AdaGrad.update">[docs]</a> <span class="k">def</span> <span class="nf">update</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_count</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">lr</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_lr</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">wd</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_wd</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">is_sparse</span> <span class="o">=</span> <span class="n">grad</span><span class="o">.</span><span class="n">stype</span> <span class="o">==</span> <span class="s1">&#39;row_sparse&#39;</span>
<span class="n">history</span> <span class="o">=</span> <span class="n">state</span>
<span class="k">if</span> <span class="n">is_sparse</span><span class="p">:</span>
<span class="n">kwargs</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;epsilon&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">float_stable_eps</span><span class="p">,</span>
<span class="s1">&#39;rescale_grad&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">rescale_grad</span><span class="p">}</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;clip_gradient&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span>
<span class="n">sparse</span><span class="o">.</span><span class="n">adagrad_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">history</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">grad</span> <span class="o">=</span> <span class="n">grad</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">rescale_grad</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">grad</span> <span class="o">=</span> <span class="n">clip</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="o">-</span><span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">)</span>
<span class="n">history</span><span class="p">[:]</span> <span class="o">+=</span> <span class="n">square</span><span class="p">(</span><span class="n">grad</span><span class="p">)</span>
<span class="n">div</span> <span class="o">=</span> <span class="n">grad</span> <span class="o">/</span> <span class="n">sqrt</span><span class="p">(</span><span class="n">history</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">float_stable_eps</span><span class="p">)</span>
<span class="n">weight</span><span class="p">[:]</span> <span class="o">+=</span> <span class="p">(</span><span class="n">div</span> <span class="o">+</span> <span class="n">weight</span> <span class="o">*</span> <span class="n">wd</span><span class="p">)</span> <span class="o">*</span> <span class="o">-</span><span class="n">lr</span></div></div>
<div class="viewcode-block" id="RMSProp"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.RMSProp">[docs]</a><span class="nd">@register</span>
<span class="k">class</span> <span class="nc">RMSProp</span><span class="p">(</span><span class="n">Optimizer</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;The RMSProp optimizer.</span>
<span class="sd"> Two versions of RMSProp are implemented:</span>
<span class="sd"> If ``centered=False``, we follow</span>
<span class="sd"> http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf by</span>
<span class="sd"> Tieleman &amp; Hinton, 2012.</span>
<span class="sd"> For details of the update algorithm see :class:`~mxnet.ndarray.rmsprop_update`.</span>
<span class="sd"> If ``centered=True``, we follow http://arxiv.org/pdf/1308.0850v5.pdf (38)-(45)</span>
<span class="sd"> by Alex Graves, 2013.</span>
<span class="sd"> For details of the update algorithm see :class:`~mxnet.ndarray.rmspropalex_update`.</span>
<span class="sd"> This optimizer accepts the following parameters in addition to those accepted</span>
<span class="sd"> by :class:`.Optimizer`.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> gamma1: float, optional</span>
<span class="sd"> A decay factor of moving average over past squared gradient.</span>
<span class="sd"> gamma2: float, optional</span>
<span class="sd"> A &quot;momentum&quot; factor. Only used if `centered`=``True``.</span>
<span class="sd"> epsilon : float, optional</span>
<span class="sd"> Small value to avoid division by 0.</span>
<span class="sd"> centered : bool, optional</span>
<span class="sd"> Flag to control which version of RMSProp to use.::</span>
<span class="sd"> True: will use Graves&#39;s version of `RMSProp`,</span>
<span class="sd"> False: will use Tieleman &amp; Hinton&#39;s version of `RMSProp`.</span>
<span class="sd"> clip_weights : float, optional</span>
<span class="sd"> Clips weights into range ``[-clip_weights, clip_weights]``.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">learning_rate</span><span class="o">=</span><span class="mf">0.001</span><span class="p">,</span> <span class="n">gamma1</span><span class="o">=</span><span class="mf">0.9</span><span class="p">,</span> <span class="n">gamma2</span><span class="o">=</span><span class="mf">0.9</span><span class="p">,</span>
<span class="n">epsilon</span><span class="o">=</span><span class="mf">1e-8</span><span class="p">,</span> <span class="n">centered</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">clip_weights</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="nb">super</span><span class="p">(</span><span class="n">RMSProp</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">learning_rate</span><span class="o">=</span><span class="n">learning_rate</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">gamma1</span> <span class="o">=</span> <span class="n">gamma1</span>
<span class="bp">self</span><span class="o">.</span><span class="n">gamma2</span> <span class="o">=</span> <span class="n">gamma2</span>
<span class="bp">self</span><span class="o">.</span><span class="n">centered</span> <span class="o">=</span> <span class="n">centered</span>
<span class="bp">self</span><span class="o">.</span><span class="n">epsilon</span> <span class="o">=</span> <span class="n">epsilon</span>
<span class="bp">self</span><span class="o">.</span><span class="n">clip_weights</span> <span class="o">=</span> <span class="n">clip_weights</span>
<div class="viewcode-block" id="RMSProp.create_state"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.RMSProp.create_state">[docs]</a> <span class="k">def</span> <span class="nf">create_state</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">centered</span><span class="p">:</span>
<span class="k">return</span> <span class="p">(</span>
<span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">stype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">stype</span><span class="p">),</span> <span class="c1"># n</span>
<span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">stype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">stype</span><span class="p">),</span> <span class="c1"># g</span>
<span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">stype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">stype</span><span class="p">))</span> <span class="c1"># delta</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">return</span> <span class="p">(</span><span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">stype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">stype</span><span class="p">),)</span> <span class="c1"># n</span></div>
<div class="viewcode-block" id="RMSProp.update"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.RMSProp.update">[docs]</a> <span class="k">def</span> <span class="nf">update</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_count</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">lr</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_lr</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">wd</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_wd</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">kwargs</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;gamma1&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">gamma1</span><span class="p">,</span> <span class="s1">&#39;epsilon&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">epsilon</span><span class="p">,</span>
<span class="s1">&#39;rescale_grad&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">rescale_grad</span><span class="p">}</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">centered</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;gamma2&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">gamma2</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;clip_gradient&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_weights</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;clip_weights&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_weights</span>
<span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">centered</span><span class="p">:</span>
<span class="p">(</span><span class="n">n</span><span class="p">,</span> <span class="p">)</span> <span class="o">=</span> <span class="n">state</span>
<span class="n">rmsprop_update</span><span class="p">(</span>
<span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">n</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span> <span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">n</span><span class="p">,</span> <span class="n">g</span><span class="p">,</span> <span class="n">delta</span> <span class="o">=</span> <span class="n">state</span>
<span class="n">rmspropalex_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">n</span><span class="p">,</span> <span class="n">g</span><span class="p">,</span> <span class="n">delta</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span>
<span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span></div></div>
<div class="viewcode-block" id="AdaDelta"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.AdaDelta">[docs]</a><span class="nd">@register</span>
<span class="k">class</span> <span class="nc">AdaDelta</span><span class="p">(</span><span class="n">Optimizer</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;The AdaDelta optimizer.</span>
<span class="sd"> This class implements AdaDelta, an optimizer described in *ADADELTA: An adaptive</span>
<span class="sd"> learning rate method*, available at https://arxiv.org/abs/1212.5701.</span>
<span class="sd"> This optimizer updates each weight by::</span>
<span class="sd"> grad = clip(grad * rescale_grad + wd * weight, clip_gradient)</span>
<span class="sd"> acc_grad = rho * acc_grad + (1. - rho) * grad * grad</span>
<span class="sd"> delta = sqrt(acc_delta + epsilon) / sqrt(acc_grad + epsilon) * grad</span>
<span class="sd"> acc_delta = rho * acc_delta + (1. - rho) * delta * delta</span>
<span class="sd"> weight -= (delta + wd * weight)</span>
<span class="sd"> This optimizer accepts the following parameters in addition to those accepted</span>
<span class="sd"> by :class:`.Optimizer`.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> rho: float</span>
<span class="sd"> Decay rate for both squared gradients and delta.</span>
<span class="sd"> epsilon : float</span>
<span class="sd"> Small value to avoid division by 0.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">rho</span><span class="o">=</span><span class="mf">0.90</span><span class="p">,</span> <span class="n">epsilon</span><span class="o">=</span><span class="mf">1e-5</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="nb">super</span><span class="p">(</span><span class="n">AdaDelta</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">rho</span> <span class="o">=</span> <span class="n">rho</span>
<span class="bp">self</span><span class="o">.</span><span class="n">epsilon</span> <span class="o">=</span> <span class="n">epsilon</span>
<div class="viewcode-block" id="AdaDelta.create_state"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.AdaDelta.create_state">[docs]</a> <span class="k">def</span> <span class="nf">create_state</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="k">return</span> <span class="p">(</span><span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">),</span> <span class="c1"># accumulated g</span>
<span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">))</span> <span class="c1"># accumulated delta</span></div>
<div class="viewcode-block" id="AdaDelta.update"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.AdaDelta.update">[docs]</a> <span class="k">def</span> <span class="nf">update</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="n">wd</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_wd</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_count</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="c1"># preprocess grad</span>
<span class="n">grad</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">rescale_grad</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">grad</span> <span class="o">=</span> <span class="n">clip</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="o">-</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">)</span>
<span class="c1"># accumulated g and delta initlization</span>
<span class="n">acc_g</span><span class="p">,</span> <span class="n">acc_delta</span> <span class="o">=</span> <span class="n">state</span>
<span class="c1"># update g, delta</span>
<span class="n">acc_g</span><span class="p">[:]</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">rho</span>
<span class="n">acc_g</span><span class="p">[:]</span> <span class="o">+=</span> <span class="p">(</span><span class="mf">1.</span> <span class="o">-</span> <span class="bp">self</span><span class="o">.</span><span class="n">rho</span><span class="p">)</span> <span class="o">*</span> <span class="n">grad</span> <span class="o">*</span> <span class="n">grad</span>
<span class="n">current_delta</span> <span class="o">=</span> <span class="n">sqrt</span><span class="p">(</span><span class="n">acc_delta</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">epsilon</span><span class="p">)</span> <span class="o">/</span> <span class="n">sqrt</span><span class="p">(</span><span class="n">acc_g</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">epsilon</span><span class="p">)</span> <span class="o">*</span> <span class="n">grad</span>
<span class="n">acc_delta</span><span class="p">[:]</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">rho</span>
<span class="n">acc_delta</span><span class="p">[:]</span> <span class="o">+=</span> <span class="p">(</span><span class="mf">1.</span> <span class="o">-</span> <span class="bp">self</span><span class="o">.</span><span class="n">rho</span><span class="p">)</span> <span class="o">*</span> <span class="n">current_delta</span> <span class="o">*</span> <span class="n">current_delta</span>
<span class="c1"># update weight</span>
<span class="n">weight</span><span class="p">[:]</span> <span class="o">-=</span> <span class="n">current_delta</span> <span class="o">+</span> <span class="n">wd</span> <span class="o">*</span> <span class="n">weight</span></div></div>
<span class="c1">#pylint: disable=invalid-name</span>
<span class="c1">#pylint: disable=line-too-long</span>
<div class="viewcode-block" id="Ftrl"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Ftrl">[docs]</a><span class="nd">@register</span>
<span class="k">class</span> <span class="nc">Ftrl</span><span class="p">(</span><span class="n">Optimizer</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;The Ftrl optimizer.</span>
<span class="sd"> Referenced from *Ad Click Prediction: a View from the Trenches*, available at</span>
<span class="sd"> http://dl.acm.org/citation.cfm?id=2488200.</span>
<span class="sd"> eta :</span>
<span class="sd"> .. math::</span>
<span class="sd"> \\eta_{t,i} = \\frac{learningrate}{\\beta+\\sqrt{\\sum_{s=1}^tg_{s,i}^2}}</span>
<span class="sd"> The optimizer updates the weight by::</span>
<span class="sd"> rescaled_grad = clip(grad * rescale_grad, clip_gradient)</span>
<span class="sd"> z += rescaled_grad - (sqrt(n + rescaled_grad**2) - sqrt(n)) * weight / learning_rate</span>
<span class="sd"> n += rescaled_grad**2</span>
<span class="sd"> w = (sign(z) * lamda1 - z) / ((beta + sqrt(n)) / learning_rate + wd) * (abs(z) &gt; lamda1)</span>
<span class="sd"> If the storage types of weight, state and grad are all ``row_sparse``, \</span>
<span class="sd"> **sparse updates** are applied by::</span>
<span class="sd"> for row in grad.indices:</span>
<span class="sd"> rescaled_grad[row] = clip(grad[row] * rescale_grad, clip_gradient)</span>
<span class="sd"> z[row] += rescaled_grad[row] - (sqrt(n[row] + rescaled_grad[row]**2) - sqrt(n[row])) * weight[row] / learning_rate</span>
<span class="sd"> n[row] += rescaled_grad[row]**2</span>
<span class="sd"> w[row] = (sign(z[row]) * lamda1 - z[row]) / ((beta + sqrt(n[row])) / learning_rate + wd) * (abs(z[row]) &gt; lamda1)</span>
<span class="sd"> The sparse update only updates the z and n for the weights whose row_sparse</span>
<span class="sd"> gradient indices appear in the current batch, rather than updating it for all</span>
<span class="sd"> indices. Compared with the original update, it can provide large</span>
<span class="sd"> improvements in model training throughput for some applications. However, it</span>
<span class="sd"> provides slightly different semantics than the original update, and</span>
<span class="sd"> may lead to different empirical results.</span>
<span class="sd"> For details of the update algorithm, see :class:`~mxnet.ndarray.ftrl_update`.</span>
<span class="sd"> This optimizer accepts the following parameters in addition to those accepted</span>
<span class="sd"> by :class:`.Optimizer`.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> lamda1 : float, optional</span>
<span class="sd"> L1 regularization coefficient.</span>
<span class="sd"> learning_rate : float, optional</span>
<span class="sd"> The initial learning rate.</span>
<span class="sd"> beta : float, optional</span>
<span class="sd"> Per-coordinate learning rate correlation parameter.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">lamda1</span><span class="o">=</span><span class="mf">0.01</span><span class="p">,</span> <span class="n">learning_rate</span><span class="o">=</span><span class="mf">0.1</span><span class="p">,</span> <span class="n">beta</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="nb">super</span><span class="p">(</span><span class="n">Ftrl</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">lamda1</span> <span class="o">=</span> <span class="n">lamda1</span>
<span class="bp">self</span><span class="o">.</span><span class="n">beta</span> <span class="o">=</span> <span class="n">beta</span>
<span class="bp">self</span><span class="o">.</span><span class="n">lr</span> <span class="o">=</span> <span class="n">learning_rate</span>
<div class="viewcode-block" id="Ftrl.create_state"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Ftrl.create_state">[docs]</a> <span class="k">def</span> <span class="nf">create_state</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="k">return</span> <span class="p">(</span><span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">stype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">stype</span><span class="p">),</span> <span class="c1"># z</span>
<span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">stype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">stype</span><span class="p">))</span> <span class="c1"># n</span></div>
<div class="viewcode-block" id="Ftrl.update"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Ftrl.update">[docs]</a> <span class="k">def</span> <span class="nf">update</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_count</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">wd</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_wd</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">lr</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_lr</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">kwargs</span> <span class="o">=</span> <span class="p">{</span><span class="s1">&#39;lamda1&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">lamda1</span><span class="p">,</span> <span class="s1">&#39;beta&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta</span><span class="p">,</span> <span class="s1">&#39;rescale_grad&#39;</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">rescale_grad</span><span class="p">}</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">:</span>
<span class="n">kwargs</span><span class="p">[</span><span class="s1">&#39;clip_gradient&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span>
<span class="c1"># accumulated g and delta initialization</span>
<span class="n">z</span><span class="p">,</span> <span class="n">n</span> <span class="o">=</span> <span class="n">state</span>
<span class="n">ftrl_update</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">z</span><span class="p">,</span> <span class="n">n</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">weight</span><span class="p">,</span>
<span class="n">lr</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span> <span class="n">wd</span><span class="o">=</span><span class="n">wd</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span></div></div>
<span class="c1"># pylint: enable=line-too-long</span>
<div class="viewcode-block" id="Adamax"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Adamax">[docs]</a><span class="nd">@register</span>
<span class="k">class</span> <span class="nc">Adamax</span><span class="p">(</span><span class="n">Optimizer</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;The AdaMax optimizer.</span>
<span class="sd"> It is a variant of Adam based on the infinity norm</span>
<span class="sd"> available at http://arxiv.org/abs/1412.6980 Section 7.</span>
<span class="sd"> The optimizer updates the weight by::</span>
<span class="sd"> grad = clip(grad * rescale_grad + wd * weight, clip_gradient)</span>
<span class="sd"> m = beta1 * m_t + (1 - beta1) * grad</span>
<span class="sd"> u = maximum(beta2 * u, abs(grad))</span>
<span class="sd"> weight -= lr / (1 - beta1**t) * m / u</span>
<span class="sd"> This optimizer accepts the following parameters in addition to those accepted</span>
<span class="sd"> by :class:`.Optimizer`.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> beta1 : float, optional</span>
<span class="sd"> Exponential decay rate for the first moment estimates.</span>
<span class="sd"> beta2 : float, optional</span>
<span class="sd"> Exponential decay rate for the second moment estimates.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">learning_rate</span><span class="o">=</span><span class="mf">0.002</span><span class="p">,</span> <span class="n">beta1</span><span class="o">=</span><span class="mf">0.9</span><span class="p">,</span> <span class="n">beta2</span><span class="o">=</span><span class="mf">0.999</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="nb">super</span><span class="p">(</span><span class="n">Adamax</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">learning_rate</span><span class="o">=</span><span class="n">learning_rate</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">beta1</span> <span class="o">=</span> <span class="n">beta1</span>
<span class="bp">self</span><span class="o">.</span><span class="n">beta2</span> <span class="o">=</span> <span class="n">beta2</span>
<div class="viewcode-block" id="Adamax.create_state"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Adamax.create_state">[docs]</a> <span class="k">def</span> <span class="nf">create_state</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="k">return</span> <span class="p">(</span><span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">dtype</span><span class="p">),</span> <span class="c1"># mean</span>
<span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">dtype</span><span class="p">))</span> <span class="c1"># variance</span></div>
<div class="viewcode-block" id="Adamax.update"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Adamax.update">[docs]</a> <span class="k">def</span> <span class="nf">update</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_count</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">lr</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_lr</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">wd</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_wd</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">t</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_index_update_count</span><span class="p">[</span><span class="n">index</span><span class="p">]</span>
<span class="n">lr</span> <span class="o">/=</span> <span class="p">(</span><span class="mf">1.</span> <span class="o">-</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta1</span><span class="o">**</span><span class="n">t</span><span class="p">)</span>
<span class="c1"># preprocess grad</span>
<span class="n">grad</span> <span class="o">=</span> <span class="n">grad</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">rescale_grad</span> <span class="o">+</span> <span class="n">wd</span> <span class="o">*</span> <span class="n">weight</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">grad</span> <span class="o">=</span> <span class="n">clip</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="o">-</span><span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">)</span>
<span class="c1"># update m_t and u_t</span>
<span class="n">m_t</span><span class="p">,</span> <span class="n">u_t</span> <span class="o">=</span> <span class="n">state</span>
<span class="n">m_t</span><span class="p">[:]</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta1</span>
<span class="n">m_t</span><span class="p">[:]</span> <span class="o">+=</span> <span class="p">(</span><span class="mf">1.</span> <span class="o">-</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta1</span><span class="p">)</span> <span class="o">*</span> <span class="n">grad</span>
<span class="n">u_t</span><span class="p">[:]</span> <span class="o">=</span> <span class="n">maximum</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">beta2</span> <span class="o">*</span> <span class="n">u_t</span><span class="p">,</span> <span class="n">NDabs</span><span class="p">(</span><span class="n">grad</span><span class="p">))</span>
<span class="c1"># update weight</span>
<span class="n">weight</span><span class="p">[:]</span> <span class="o">-=</span> <span class="n">lr</span> <span class="o">*</span> <span class="n">m_t</span> <span class="o">/</span> <span class="n">u_t</span></div></div>
<div class="viewcode-block" id="Nadam"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Nadam">[docs]</a><span class="nd">@register</span>
<span class="k">class</span> <span class="nc">Nadam</span><span class="p">(</span><span class="n">Optimizer</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;The Nesterov Adam optimizer.</span>
<span class="sd"> Much like Adam is essentially RMSprop with momentum,</span>
<span class="sd"> Nadam is Adam RMSprop with Nesterov momentum available</span>
<span class="sd"> at http://cs229.stanford.edu/proj2015/054_report.pdf.</span>
<span class="sd"> This optimizer accepts the following parameters in addition to those accepted</span>
<span class="sd"> by :class:`.Optimizer`.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> beta1 : float, optional</span>
<span class="sd"> Exponential decay rate for the first moment estimates.</span>
<span class="sd"> beta2 : float, optional</span>
<span class="sd"> Exponential decay rate for the second moment estimates.</span>
<span class="sd"> epsilon : float, optional</span>
<span class="sd"> Small value to avoid division by 0.</span>
<span class="sd"> schedule_decay : float, optional</span>
<span class="sd"> Exponential decay rate for the momentum schedule</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">learning_rate</span><span class="o">=</span><span class="mf">0.001</span><span class="p">,</span> <span class="n">beta1</span><span class="o">=</span><span class="mf">0.9</span><span class="p">,</span> <span class="n">beta2</span><span class="o">=</span><span class="mf">0.999</span><span class="p">,</span> <span class="n">epsilon</span><span class="o">=</span><span class="mf">1e-8</span><span class="p">,</span>
<span class="n">schedule_decay</span><span class="o">=</span><span class="mf">0.004</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="nb">super</span><span class="p">(</span><span class="n">Nadam</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">learning_rate</span><span class="o">=</span><span class="n">learning_rate</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">beta1</span> <span class="o">=</span> <span class="n">beta1</span>
<span class="bp">self</span><span class="o">.</span><span class="n">beta2</span> <span class="o">=</span> <span class="n">beta2</span>
<span class="bp">self</span><span class="o">.</span><span class="n">epsilon</span> <span class="o">=</span> <span class="n">epsilon</span>
<span class="bp">self</span><span class="o">.</span><span class="n">schedule_decay</span> <span class="o">=</span> <span class="n">schedule_decay</span>
<span class="bp">self</span><span class="o">.</span><span class="n">m_schedule</span> <span class="o">=</span> <span class="mf">1.</span>
<div class="viewcode-block" id="Nadam.create_state"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Nadam.create_state">[docs]</a> <span class="k">def</span> <span class="nf">create_state</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="k">return</span> <span class="p">(</span><span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">dtype</span><span class="p">),</span> <span class="c1"># mean</span>
<span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">weight</span><span class="o">.</span><span class="n">dtype</span><span class="p">))</span> <span class="c1"># variance</span></div>
<div class="viewcode-block" id="Nadam.update"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Nadam.update">[docs]</a> <span class="k">def</span> <span class="nf">update</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="k">assert</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_update_count</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">lr</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_lr</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">wd</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_wd</span><span class="p">(</span><span class="n">index</span><span class="p">)</span>
<span class="n">t</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_index_update_count</span><span class="p">[</span><span class="n">index</span><span class="p">]</span>
<span class="c1"># preprocess grad</span>
<span class="n">grad</span> <span class="o">=</span> <span class="n">grad</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">rescale_grad</span> <span class="o">+</span> <span class="n">wd</span> <span class="o">*</span> <span class="n">weight</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">grad</span> <span class="o">=</span> <span class="n">clip</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="o">-</span><span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">clip_gradient</span><span class="p">)</span>
<span class="c1"># warming momentum schedule</span>
<span class="n">momentum_t</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta1</span> <span class="o">*</span> <span class="p">(</span><span class="mf">1.</span> <span class="o">-</span> <span class="mf">0.5</span> <span class="o">*</span> <span class="p">(</span><span class="nb">pow</span><span class="p">(</span><span class="mf">0.96</span><span class="p">,</span> <span class="n">t</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">schedule_decay</span><span class="p">)))</span>
<span class="n">momentum_t_1</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta1</span> <span class="o">*</span> <span class="p">(</span><span class="mf">1.</span> <span class="o">-</span> <span class="mf">0.5</span> <span class="o">*</span> <span class="p">(</span><span class="nb">pow</span><span class="p">(</span><span class="mf">0.96</span><span class="p">,</span> <span class="p">(</span><span class="n">t</span> <span class="o">+</span> <span class="mi">1</span><span class="p">)</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">schedule_decay</span><span class="p">)))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">m_schedule</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">m_schedule</span> <span class="o">*</span> <span class="n">momentum_t</span>
<span class="n">m_schedule_next</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">m_schedule</span> <span class="o">*</span> <span class="n">momentum_t_1</span>
<span class="c1"># update m_t and v_t</span>
<span class="n">m_t</span><span class="p">,</span> <span class="n">v_t</span> <span class="o">=</span> <span class="n">state</span>
<span class="n">m_t</span><span class="p">[:]</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta1</span>
<span class="n">m_t</span><span class="p">[:]</span> <span class="o">+=</span> <span class="p">(</span><span class="mf">1.</span> <span class="o">-</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta1</span><span class="p">)</span> <span class="o">*</span> <span class="n">grad</span>
<span class="n">v_t</span><span class="p">[:]</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta2</span>
<span class="n">v_t</span><span class="p">[:]</span> <span class="o">+=</span> <span class="p">(</span><span class="mf">1.</span> <span class="o">-</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta2</span><span class="p">)</span> <span class="o">*</span> <span class="n">grad</span> <span class="o">*</span> <span class="n">grad</span>
<span class="n">grad_prime</span> <span class="o">=</span> <span class="n">grad</span> <span class="o">/</span> <span class="p">(</span><span class="mf">1.</span> <span class="o">-</span> <span class="bp">self</span><span class="o">.</span><span class="n">m_schedule</span><span class="p">)</span>
<span class="n">m_t_prime</span> <span class="o">=</span> <span class="n">m_t</span> <span class="o">/</span> <span class="p">(</span><span class="mf">1.</span> <span class="o">-</span> <span class="n">m_schedule_next</span><span class="p">)</span>
<span class="n">v_t_prime</span> <span class="o">=</span> <span class="n">v_t</span> <span class="o">/</span> <span class="p">(</span><span class="mf">1.</span> <span class="o">-</span> <span class="nb">pow</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">beta2</span><span class="p">,</span> <span class="n">t</span><span class="p">))</span>
<span class="n">m_t_bar</span> <span class="o">=</span> <span class="p">(</span><span class="mf">1.</span> <span class="o">-</span> <span class="n">momentum_t</span><span class="p">)</span> <span class="o">*</span> <span class="n">grad_prime</span> <span class="o">+</span> <span class="n">momentum_t_1</span> <span class="o">*</span> <span class="n">m_t_prime</span>
<span class="c1"># update weight</span>
<span class="n">weight</span><span class="p">[:]</span> <span class="o">-=</span> <span class="n">lr</span> <span class="o">*</span> <span class="n">m_t_bar</span> <span class="o">/</span> <span class="p">(</span><span class="n">sqrt</span><span class="p">(</span><span class="n">v_t_prime</span><span class="p">)</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">epsilon</span><span class="p">)</span></div></div>
<div class="viewcode-block" id="Test"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Test">[docs]</a><span class="nd">@register</span>
<span class="k">class</span> <span class="nc">Test</span><span class="p">(</span><span class="n">Optimizer</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;The Test optimizer&quot;&quot;&quot;</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
<span class="nb">super</span><span class="p">(</span><span class="n">Test</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
<div class="viewcode-block" id="Test.create_state"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Test.create_state">[docs]</a> <span class="k">def</span> <span class="nf">create_state</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Creates a state to duplicate weight.&quot;&quot;&quot;</span>
<span class="k">return</span> <span class="n">zeros</span><span class="p">(</span><span class="n">weight</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">weight</span><span class="o">.</span><span class="n">context</span><span class="p">)</span></div>
<div class="viewcode-block" id="Test.update"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Test.update">[docs]</a> <span class="k">def</span> <span class="nf">update</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">weight</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">state</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Performs w += rescale_grad * grad.&quot;&quot;&quot;</span>
<span class="n">weight</span><span class="p">[:]</span> <span class="o">+=</span> <span class="n">grad</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">rescale_grad</span>
<span class="n">state</span><span class="p">[:]</span> <span class="o">=</span> <span class="n">weight</span></div></div>
<span class="c1"># backward compatibility wrapper for Optimizer.CreateOptimizer</span>
<span class="n">create</span> <span class="o">=</span> <span class="n">Optimizer</span><span class="o">.</span><span class="n">create_optimizer</span> <span class="c1"># pylint: disable=invalid-name</span>
<span class="k">def</span> <span class="nf">_as_classic</span><span class="p">(</span><span class="n">a</span><span class="p">,</span> <span class="n">allow_np</span><span class="p">):</span>
<span class="c1"># TODO(junwu): This is a temp solution for allowing converting</span>
<span class="c1"># np.ndarray to mx.nd.NDArray to be fed into the optimizer since</span>
<span class="c1"># users may have custom optimizers implemented using mx.nd.NDArray ops.</span>
<span class="kn">from</span> <span class="nn">..numpy</span> <span class="kn">import</span> <span class="n">ndarray</span> <span class="k">as</span> <span class="n">np_ndarray</span>
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">a</span><span class="p">,</span> <span class="p">(</span><span class="nb">tuple</span><span class="p">,</span> <span class="nb">list</span><span class="p">)):</span>
<span class="k">if</span> <span class="nb">any</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">np_ndarray</span><span class="p">)</span> <span class="k">for</span> <span class="n">x</span> <span class="ow">in</span> <span class="n">a</span><span class="p">):</span>
<span class="k">if</span> <span class="n">allow_np</span><span class="p">:</span>
<span class="k">return</span> <span class="p">[</span><span class="n">x</span><span class="o">.</span><span class="n">as_nd_ndarray</span><span class="p">()</span> <span class="k">for</span> <span class="n">x</span> <span class="ow">in</span> <span class="n">a</span><span class="p">]</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="s1">&#39;Converting np.ndarray to mx.nd.NDArray is not allowed&#39;</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">a</span><span class="p">,</span> <span class="n">np_ndarray</span><span class="p">):</span>
<span class="k">if</span> <span class="n">allow_np</span><span class="p">:</span>
<span class="k">return</span> <span class="n">a</span><span class="o">.</span><span class="n">as_nd_ndarray</span><span class="p">()</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="s1">&#39;Converting np.ndarray to mx.nd.NDArray is not allowed&#39;</span><span class="p">)</span>
<span class="k">return</span> <span class="n">a</span>
<div class="viewcode-block" id="Updater"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Updater">[docs]</a><span class="k">class</span> <span class="nc">Updater</span><span class="p">(</span><span class="nb">object</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Updater for kvstore.&quot;&quot;&quot;</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">optimizer</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">optimizer</span> <span class="o">=</span> <span class="n">optimizer</span>
<span class="bp">self</span><span class="o">.</span><span class="n">states</span> <span class="o">=</span> <span class="p">{}</span>
<span class="bp">self</span><span class="o">.</span><span class="n">states_synced</span> <span class="o">=</span> <span class="p">{}</span>
<span class="bp">self</span><span class="o">.</span><span class="n">aggregate_updates</span> <span class="o">=</span> <span class="n">optimizer</span><span class="o">.</span><span class="n">aggregate_num</span> <span class="o">&gt;</span> <span class="mi">0</span>
<span class="k">def</span> <span class="fm">__call__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">index</span><span class="p">,</span> <span class="n">grad</span><span class="p">,</span> <span class="n">weight</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Updates weight given gradient and index.&quot;&quot;&quot;</span>
<span class="n">allow_np</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">optimizer</span><span class="o">.</span><span class="n">allow_np_array</span> <span class="k">if</span> <span class="nb">hasattr</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">optimizer</span><span class="p">,</span> <span class="s2">&quot;allow_np_array&quot;</span><span class="p">)</span> <span class="k">else</span> <span class="n">is_np_array</span><span class="p">()</span>
<span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">index</span><span class="p">,</span> <span class="p">(</span><span class="nb">list</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">)):</span>
<span class="n">indices</span> <span class="o">=</span> <span class="p">[</span><span class="n">index</span><span class="p">]</span>
<span class="n">grads</span> <span class="o">=</span> <span class="p">[</span><span class="n">_as_classic</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="n">allow_np</span><span class="p">)]</span>
<span class="n">weights</span> <span class="o">=</span> <span class="p">[</span><span class="n">_as_classic</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">allow_np</span><span class="p">)]</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">indices</span> <span class="o">=</span> <span class="n">index</span>
<span class="n">grads</span> <span class="o">=</span> <span class="n">_as_classic</span><span class="p">(</span><span class="n">grad</span><span class="p">,</span> <span class="n">allow_np</span><span class="p">)</span>
<span class="n">weights</span> <span class="o">=</span> <span class="n">_as_classic</span><span class="p">(</span><span class="n">weight</span><span class="p">,</span> <span class="n">allow_np</span><span class="p">)</span>
<span class="k">if</span> <span class="n">weights</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">optimizer</span><span class="o">.</span><span class="n">_set_current_context</span><span class="p">(</span><span class="n">weights</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">context</span><span class="o">.</span><span class="n">device_id</span><span class="p">)</span>
<span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">idx</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">indices</span><span class="p">):</span>
<span class="c1"># convert ctypes.char_p.value back to python str if needed</span>
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">idx</span><span class="p">,</span> <span class="nb">bytes</span><span class="p">):</span>
<span class="n">indices</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">=</span> <span class="n">py_str</span><span class="p">(</span><span class="n">idx</span><span class="p">)</span>
<span class="n">idx</span> <span class="o">=</span> <span class="n">indices</span><span class="p">[</span><span class="n">i</span><span class="p">]</span>
<span class="k">if</span> <span class="n">idx</span> <span class="ow">not</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">states</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">states</span><span class="p">[</span><span class="n">idx</span><span class="p">]</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">optimizer</span><span class="o">.</span><span class="n">create_state_multi_precision</span><span class="p">(</span><span class="n">idx</span><span class="p">,</span> <span class="n">weights</span><span class="p">[</span><span class="n">i</span><span class="p">])</span>
<span class="bp">self</span><span class="o">.</span><span class="n">states_synced</span><span class="p">[</span><span class="n">idx</span><span class="p">]</span> <span class="o">=</span> <span class="kc">True</span>
<span class="k">elif</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">states_synced</span><span class="p">[</span><span class="n">idx</span><span class="p">]:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">states</span><span class="p">[</span><span class="n">idx</span><span class="p">]</span> <span class="o">=</span> \
<span class="bp">self</span><span class="o">.</span><span class="n">sync_state_context</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">states</span><span class="p">[</span><span class="n">idx</span><span class="p">],</span> <span class="n">weights</span><span class="p">[</span><span class="n">i</span><span class="p">]</span><span class="o">.</span><span class="n">context</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">states_synced</span><span class="p">[</span><span class="n">idx</span><span class="p">]</span> <span class="o">=</span> <span class="kc">True</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">aggregate_updates</span><span class="p">:</span>
<span class="c1"># segregate values based on type</span>
<span class="n">type_map</span> <span class="o">=</span> <span class="p">{}</span>
<span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">w</span><span class="p">,</span> <span class="n">g</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">(</span><span class="n">indices</span><span class="p">,</span> <span class="n">weights</span><span class="p">,</span> <span class="n">grads</span><span class="p">):</span>
<span class="k">if</span> <span class="n">w</span><span class="o">.</span><span class="n">dtype</span> <span class="ow">in</span> <span class="n">type_map</span><span class="p">:</span>
<span class="n">type_map</span><span class="p">[</span><span class="n">w</span><span class="o">.</span><span class="n">dtype</span><span class="p">]</span><span class="o">.</span><span class="n">append</span><span class="p">((</span><span class="n">i</span><span class="p">,</span> <span class="n">w</span><span class="p">,</span> <span class="n">g</span><span class="p">))</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">type_map</span><span class="p">[</span><span class="n">w</span><span class="o">.</span><span class="n">dtype</span><span class="p">]</span> <span class="o">=</span> <span class="p">[(</span><span class="n">i</span><span class="p">,</span> <span class="n">w</span><span class="p">,</span> <span class="n">g</span><span class="p">)]</span>
<span class="k">for</span> <span class="n">idx</span> <span class="ow">in</span> <span class="n">type_map</span><span class="p">:</span>
<span class="n">current_index</span> <span class="o">=</span> <span class="mi">0</span>
<span class="n">indices</span><span class="p">,</span> <span class="n">weights</span><span class="p">,</span> <span class="n">grads</span> <span class="o">=</span> <span class="nb">zip</span><span class="p">(</span><span class="o">*</span><span class="n">type_map</span><span class="p">[</span><span class="n">idx</span><span class="p">])</span>
<span class="k">while</span> <span class="n">current_index</span> <span class="o">&lt;</span> <span class="nb">len</span><span class="p">(</span><span class="n">indices</span><span class="p">):</span>
<span class="n">states</span> <span class="o">=</span> <span class="p">[]</span>
<span class="n">step</span> <span class="o">=</span> <span class="nb">min</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">optimizer</span><span class="o">.</span><span class="n">aggregate_num</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">indices</span><span class="p">)</span> <span class="o">-</span> <span class="n">current_index</span><span class="p">)</span>
<span class="k">for</span> <span class="n">j</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">step</span><span class="p">):</span>
<span class="n">states</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">states</span><span class="p">[</span><span class="n">indices</span><span class="p">[</span><span class="n">current_index</span> <span class="o">+</span> <span class="n">j</span><span class="p">]])</span>
<span class="bp">self</span><span class="o">.</span><span class="n">optimizer</span><span class="o">.</span><span class="n">update_multi_precision</span><span class="p">(</span>
<span class="n">indices</span><span class="p">[</span><span class="n">current_index</span><span class="p">:</span><span class="n">current_index</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">optimizer</span><span class="o">.</span><span class="n">aggregate_num</span><span class="p">],</span>
<span class="n">weights</span><span class="p">[</span><span class="n">current_index</span><span class="p">:</span><span class="n">current_index</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">optimizer</span><span class="o">.</span><span class="n">aggregate_num</span><span class="p">],</span>
<span class="n">grads</span><span class="p">[</span><span class="n">current_index</span><span class="p">:</span><span class="n">current_index</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">optimizer</span><span class="o">.</span><span class="n">aggregate_num</span><span class="p">],</span>
<span class="n">states</span><span class="p">)</span>
<span class="n">current_index</span> <span class="o">+=</span> <span class="bp">self</span><span class="o">.</span><span class="n">optimizer</span><span class="o">.</span><span class="n">aggregate_num</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">w</span><span class="p">,</span> <span class="n">g</span> <span class="ow">in</span> <span class="nb">zip</span><span class="p">(</span><span class="n">indices</span><span class="p">,</span> <span class="n">weights</span><span class="p">,</span> <span class="n">grads</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">optimizer</span><span class="o">.</span><span class="n">update_multi_precision</span><span class="p">(</span><span class="n">i</span><span class="p">,</span> <span class="n">w</span><span class="p">,</span> <span class="n">g</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">states</span><span class="p">[</span><span class="n">i</span><span class="p">])</span>
<div class="viewcode-block" id="Updater.sync_state_context"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Updater.sync_state_context">[docs]</a> <span class="k">def</span> <span class="nf">sync_state_context</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">state</span><span class="p">,</span> <span class="n">context</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;sync state context.&quot;&quot;&quot;</span>
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">state</span><span class="p">,</span> <span class="n">NDArray</span><span class="p">):</span>
<span class="k">return</span> <span class="n">state</span><span class="o">.</span><span class="n">as_in_context</span><span class="p">(</span><span class="n">context</span><span class="p">)</span>
<span class="k">elif</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">state</span><span class="p">,</span> <span class="p">(</span><span class="nb">tuple</span><span class="p">,</span> <span class="nb">list</span><span class="p">)):</span>
<span class="n">synced_state</span> <span class="o">=</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">sync_state_context</span><span class="p">(</span><span class="n">i</span><span class="p">,</span> <span class="n">context</span><span class="p">)</span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="n">state</span><span class="p">)</span>
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">state</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">):</span>
<span class="k">return</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">synced_state</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">return</span> <span class="nb">list</span><span class="p">(</span><span class="n">synced_state</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">return</span> <span class="n">state</span></div>
<div class="viewcode-block" id="Updater.set_states"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Updater.set_states">[docs]</a> <span class="k">def</span> <span class="nf">set_states</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">states</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Sets updater states.&quot;&quot;&quot;</span>
<span class="n">states</span> <span class="o">=</span> <span class="n">pickle</span><span class="o">.</span><span class="n">loads</span><span class="p">(</span><span class="n">states</span><span class="p">)</span>
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">states</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">)</span> <span class="ow">and</span> <span class="nb">len</span><span class="p">(</span><span class="n">states</span><span class="p">)</span> <span class="o">==</span> <span class="mi">2</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">states</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">optimizer</span> <span class="o">=</span> <span class="n">states</span>
<span class="k">else</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">states</span> <span class="o">=</span> <span class="n">states</span>
<span class="bp">self</span><span class="o">.</span><span class="n">states_synced</span> <span class="o">=</span> <span class="nb">dict</span><span class="o">.</span><span class="n">fromkeys</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">states</span><span class="o">.</span><span class="n">keys</span><span class="p">(),</span> <span class="kc">False</span><span class="p">)</span></div>
<div class="viewcode-block" id="Updater.get_states"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.Updater.get_states">[docs]</a> <span class="k">def</span> <span class="nf">get_states</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dump_optimizer</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Gets updater states.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> dump_optimizer : bool, default False</span>
<span class="sd"> Whether to also save the optimizer itself. This would also save optimizer</span>
<span class="sd"> information such as learning rate and weight decay schedules.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">return</span> <span class="n">pickle</span><span class="o">.</span><span class="n">dumps</span><span class="p">((</span><span class="bp">self</span><span class="o">.</span><span class="n">states</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">optimizer</span><span class="p">)</span> <span class="k">if</span> <span class="n">dump_optimizer</span> <span class="k">else</span> <span class="bp">self</span><span class="o">.</span><span class="n">states</span><span class="p">)</span></div></div>
<div class="viewcode-block" id="get_updater"><a class="viewcode-back" href="../../../api/optimizer/index.html#mxnet.optimizer.get_updater">[docs]</a><span class="k">def</span> <span class="nf">get_updater</span><span class="p">(</span><span class="n">optimizer</span><span class="p">):</span>
<span class="sd">&quot;&quot;&quot;Returns a closure of the updater needed for kvstore.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> optimizer: Optimizer</span>
<span class="sd"> The optimizer.</span>
<span class="sd"> Returns</span>
<span class="sd"> -------</span>
<span class="sd"> updater: function</span>
<span class="sd"> The closure of the updater.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">return</span> <span class="n">Updater</span><span class="p">(</span><span class="n">optimizer</span><span class="p">)</span></div>
</pre></div>
<hr class="feedback-hr-top" />
<div class="feedback-container">
<div class="feedback-question">Did this page help you?</div>
<div class="feedback-answer-container">
<div class="feedback-answer yes-link" data-response="yes">Yes</div>
<div class="feedback-answer no-link" data-response="no">No</div>
</div>
<div class="feedback-thank-you">Thanks for your feedback!</div>
</div>
<hr class="feedback-hr-bottom" />
</div>
<div class="side-doc-outline">
<div class="side-doc-outline--content">
</div>
</div>
<div class="clearer"></div>
</div><div class="pagenation">
</div>
<footer class="site-footer h-card">
<div class="wrapper">
<div class="row">
<div class="col-4">
<h4 class="footer-category-title">Resources</h4>
<ul class="contact-list">
<li><a class="u-email" href="mailto:dev@mxnet.apache.org">Dev list</a></li>
<li><a class="u-email" href="mailto:user@mxnet.apache.org">User mailing list</a></li>
<li><a href="https://cwiki.apache.org/confluence/display/MXNET/Apache+MXNet+Home">Developer Wiki</a></li>
<li><a href="https://issues.apache.org/jira/projects/MXNET/issues">Jira Tracker</a></li>
<li><a href="https://github.com/apache/incubator-mxnet/labels/Roadmap">Github Roadmap</a></li>
<li><a href="https://medium.com/apache-mxnet">Blog</a></li>
<li><a href="https://discuss.mxnet.io">Forum</a></li>
<li><a href="/community/contribute">Contribute</a></li>
</ul>
</div>
<div class="col-4"><ul class="social-media-list"><li><a href="https://github.com/apache/incubator-mxnet"><svg class="svg-icon"><use xlink:href="../../../_static/minima-social-icons.svg#github"></use></svg> <span class="username">apache/incubator-mxnet</span></a></li><li><a href="https://www.twitter.com/apachemxnet"><svg class="svg-icon"><use xlink:href="../../../_static/minima-social-icons.svg#twitter"></use></svg> <span class="username">apachemxnet</span></a></li><li><a href="https://youtube.com/apachemxnet"><svg class="svg-icon"><use xlink:href="../../../_static/minima-social-icons.svg#youtube"></use></svg> <span class="username">apachemxnet</span></a></li></ul>
</div>
<div class="col-4 footer-text">
<p>A flexible and efficient library for deep learning.</p>
</div>
</div>
</div>
</footer>
<footer class="site-footer2">
<div class="wrapper">
<div class="row">
<div class="col-3">
<img src="../../../_static/apache_incubator_logo.png" class="footer-logo col-2">
</div>
<div class="footer-bottom-warning col-9">
<p>Apache MXNet is an effort undergoing incubation at <a href="http://www.apache.org/">The Apache Software Foundation</a> (ASF), <span style="font-weight:bold">sponsored by the <i>Apache Incubator</i></span>. Incubation is required
of all newly accepted projects until a further review indicates that the infrastructure,
communications, and decision making process have stabilized in a manner consistent with other
successful ASF projects. While incubation status is not necessarily a reflection of the completeness
or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.
</p><p>"Copyright © 2017-2018, The Apache Software Foundation Apache MXNet, MXNet, Apache, the Apache
feather, and the Apache MXNet project logo are either registered trademarks or trademarks of the
Apache Software Foundation."</p>
</div>
</div>
</div>
</footer>
</body>
</html>