blob: dee4fee9a37ec8c7203d38499914f4aaa0c6631b [file] [log] [blame]
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.13"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<title>mxnet: /work/mxnet/3rdparty/mshadow/mshadow/tensor_gpu-inl.h Source File</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/searchdata.js"></script>
<script type="text/javascript" src="search/search.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<td id="projectalign" style="padding-left: 0.5em;">
<div id="projectname">mxnet
</div>
</td>
</tr>
</tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.13 -->
<script type="text/javascript">
var searchBox = new SearchBox("searchBox", "search",false,'Search');
</script>
<script type="text/javascript" src="menudata.js"></script>
<script type="text/javascript" src="menu.js"></script>
<script type="text/javascript">
$(function() {
initMenu('',true,false,'search.php','Search');
$(document).ready(function() { init_search(); });
});
</script>
<div id="main-nav"></div>
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
onkeydown="return searchBox.OnSearchSelectKey(event)">
</div>
<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0"
name="MSearchResults" id="MSearchResults">
</iframe>
</div>
<div id="nav-path" class="navpath">
<ul>
<li class="navelem"><a class="el" href="dir_8cab8f464681f7cc51cee77e79a434cd.html">3rdparty</a></li><li class="navelem"><a class="el" href="dir_3e48ced36faa4eaa1b41f6d960bf0edb.html">mshadow</a></li><li class="navelem"><a class="el" href="dir_00b035bb2ad81894e6ad291054ea5f82.html">mshadow</a></li> </ul>
</div>
</div><!-- top -->
<div class="header">
<div class="headertitle">
<div class="title">tensor_gpu-inl.h</div> </div>
</div><!--header-->
<div class="contents">
<a href="tensor__gpu-inl_8h.html">Go to the documentation of this file.</a><div class="fragment"><div class="line"><a name="l00001"></a><span class="lineno"> 1</span>&#160;<span class="comment">/*</span></div><div class="line"><a name="l00002"></a><span class="lineno"> 2</span>&#160;<span class="comment"> * Licensed to the Apache Software Foundation (ASF) under one</span></div><div class="line"><a name="l00003"></a><span class="lineno"> 3</span>&#160;<span class="comment"> * or more contributor license agreements. See the NOTICE file</span></div><div class="line"><a name="l00004"></a><span class="lineno"> 4</span>&#160;<span class="comment"> * distributed with this work for additional information</span></div><div class="line"><a name="l00005"></a><span class="lineno"> 5</span>&#160;<span class="comment"> * regarding copyright ownership. The ASF licenses this file</span></div><div class="line"><a name="l00006"></a><span class="lineno"> 6</span>&#160;<span class="comment"> * to you under the Apache License, Version 2.0 (the</span></div><div class="line"><a name="l00007"></a><span class="lineno"> 7</span>&#160;<span class="comment"> * &quot;License&quot;); you may not use this file except in compliance</span></div><div class="line"><a name="l00008"></a><span class="lineno"> 8</span>&#160;<span class="comment"> * with the License. You may obtain a copy of the License at</span></div><div class="line"><a name="l00009"></a><span class="lineno"> 9</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00010"></a><span class="lineno"> 10</span>&#160;<span class="comment"> * http://www.apache.org/licenses/LICENSE-2.0</span></div><div class="line"><a name="l00011"></a><span class="lineno"> 11</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00012"></a><span class="lineno"> 12</span>&#160;<span class="comment"> * Unless required by applicable law or agreed to in writing,</span></div><div class="line"><a name="l00013"></a><span class="lineno"> 13</span>&#160;<span class="comment"> * software distributed under the License is distributed on an</span></div><div class="line"><a name="l00014"></a><span class="lineno"> 14</span>&#160;<span class="comment"> * &quot;AS IS&quot; BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY</span></div><div class="line"><a name="l00015"></a><span class="lineno"> 15</span>&#160;<span class="comment"> * KIND, either express or implied. See the License for the</span></div><div class="line"><a name="l00016"></a><span class="lineno"> 16</span>&#160;<span class="comment"> * specific language governing permissions and limitations</span></div><div class="line"><a name="l00017"></a><span class="lineno"> 17</span>&#160;<span class="comment"> * under the License.</span></div><div class="line"><a name="l00018"></a><span class="lineno"> 18</span>&#160;<span class="comment"> */</span></div><div class="line"><a name="l00019"></a><span class="lineno"> 19</span>&#160;</div><div class="line"><a name="l00025"></a><span class="lineno"> 25</span>&#160;<span class="preprocessor">#ifndef MSHADOW_TENSOR_GPU_INL_H_</span></div><div class="line"><a name="l00026"></a><span class="lineno"> 26</span>&#160;<span class="preprocessor">#define MSHADOW_TENSOR_GPU_INL_H_</span></div><div class="line"><a name="l00027"></a><span class="lineno"> 27</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="3rdparty_2mshadow_2mshadow_2base_8h.html">./base.h</a>&quot;</span></div><div class="line"><a name="l00028"></a><span class="lineno"> 28</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="tensor_8h.html">./tensor.h</a>&quot;</span></div><div class="line"><a name="l00029"></a><span class="lineno"> 29</span>&#160;</div><div class="line"><a name="l00030"></a><span class="lineno"> 30</span>&#160;<span class="keyword">namespace </span><a class="code" href="namespacemshadow.html">mshadow</a> {</div><div class="line"><a name="l00031"></a><span class="lineno"> 31</span>&#160;<span class="preprocessor">#if MSHADOW_USE_CUDA</span></div><div class="line"><a name="l00032"></a><span class="lineno"> 32</span>&#160;<span class="keyword">template</span>&lt;&gt;</div><div class="line"><a name="l00033"></a><span class="lineno"><a class="line" href="namespacemshadow.html#a425ff81d201a5d6de2c507c11ee63869"> 33</a></span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#a425ff81d201a5d6de2c507c11ee63869">InitTensorEngine&lt;gpu&gt;</a>(<span class="keywordtype">int</span> dev_id) {</div><div class="line"><a name="l00034"></a><span class="lineno"> 34</span>&#160; cudaDeviceProp prop;</div><div class="line"><a name="l00035"></a><span class="lineno"> 35</span>&#160; <span class="keywordtype">int</span> device_id = 0;</div><div class="line"><a name="l00036"></a><span class="lineno"> 36</span>&#160; <span class="keywordtype">int</span> device_count = 0;</div><div class="line"><a name="l00037"></a><span class="lineno"> 37</span>&#160; cudaGetDeviceCount(&amp;device_count);</div><div class="line"><a name="l00038"></a><span class="lineno"> 38</span>&#160; CHECK_GT(device_count, 0) &lt;&lt; <span class="stringliteral">&quot;Cannot find CUDA device. Please check CUDA-Configuration&quot;</span>;</div><div class="line"><a name="l00039"></a><span class="lineno"> 39</span>&#160; <span class="keywordflow">if</span> (dev_id &lt; 0) {</div><div class="line"><a name="l00040"></a><span class="lineno"> 40</span>&#160; device_id = 0;</div><div class="line"><a name="l00041"></a><span class="lineno"> 41</span>&#160; } <span class="keywordflow">else</span> {</div><div class="line"><a name="l00042"></a><span class="lineno"> 42</span>&#160; device_id = dev_id;</div><div class="line"><a name="l00043"></a><span class="lineno"> 43</span>&#160; }</div><div class="line"><a name="l00044"></a><span class="lineno"> 44</span>&#160; CHECK_LT(device_id, device_count) &lt;&lt; <span class="stringliteral">&quot;Incorrect Device ID&quot;</span>;</div><div class="line"><a name="l00045"></a><span class="lineno"> 45</span>&#160; <a class="code" href="3rdparty_2mshadow_2mshadow_2base_8h.html#a8f433b4dd005a854eec58178ffd3d4bd">MSHADOW_CUDA_CALL</a>(cudaSetDevice(device_id));</div><div class="line"><a name="l00046"></a><span class="lineno"> 46</span>&#160; <a class="code" href="3rdparty_2mshadow_2mshadow_2base_8h.html#a8f433b4dd005a854eec58178ffd3d4bd">MSHADOW_CUDA_CALL</a>(cudaGetDeviceProperties(&amp;prop, device_id));</div><div class="line"><a name="l00047"></a><span class="lineno"> 47</span>&#160;}</div><div class="line"><a name="l00048"></a><span class="lineno"> 48</span>&#160;<span class="keyword">template</span>&lt;&gt;</div><div class="line"><a name="l00049"></a><span class="lineno"><a class="line" href="namespacemshadow.html#aac00a578d4eb8fc89263161eca8dc47b"> 49</a></span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#aac00a578d4eb8fc89263161eca8dc47b">ShutdownTensorEngine&lt;gpu&gt;</a>(void) {</div><div class="line"><a name="l00050"></a><span class="lineno"> 50</span>&#160;}</div><div class="line"><a name="l00051"></a><span class="lineno"> 51</span>&#160;<span class="keyword">template</span>&lt;&gt;</div><div class="line"><a name="l00052"></a><span class="lineno"><a class="line" href="namespacemshadow.html#ae3d85204767012fdd9fdddd27313e1fd"> 52</a></span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#ae3d85204767012fdd9fdddd27313e1fd">SetDevice&lt;gpu&gt;</a>(<span class="keywordtype">int</span> devid) {</div><div class="line"><a name="l00053"></a><span class="lineno"> 53</span>&#160; <a class="code" href="3rdparty_2mshadow_2mshadow_2base_8h.html#a8f433b4dd005a854eec58178ffd3d4bd">MSHADOW_CUDA_CALL</a>(cudaSetDevice(devid));</div><div class="line"><a name="l00054"></a><span class="lineno"> 54</span>&#160;}</div><div class="line"><a name="l00055"></a><span class="lineno"> 55</span>&#160;<span class="keyword">template</span>&lt;<span class="keywordtype">int</span> dim, <span class="keyword">typename</span> DType&gt;</div><div class="line"><a name="l00056"></a><span class="lineno"><a class="line" href="namespacemshadow.html#abe33255a933f2196962c5f9bc37128ea"> 56</a></span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#aef49c3cef522198322017315341ac689">AllocSpace</a>(<a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, dim, DType&gt;</a> *obj, <span class="keywordtype">bool</span> <a class="code" href="namespacemshadow_1_1expr.html#a97c45c821ac8275dd75fa585f47b9e00">pad</a>) {</div><div class="line"><a name="l00057"></a><span class="lineno"> 57</span>&#160; <span class="keywordtype">size_t</span> pitch;</div><div class="line"><a name="l00058"></a><span class="lineno"> 58</span>&#160; <span class="comment">// common choice for cuda mem align unit is 32</span></div><div class="line"><a name="l00059"></a><span class="lineno"> 59</span>&#160; <span class="keywordflow">if</span> (pad &amp;&amp; obj-&gt;<a class="code" href="structmshadow_1_1Tensor.html#a88cbcae11653307bfa4c99804320b638">size</a>(dim - 1) &gt;= <a class="code" href="3rdparty_2mshadow_2mshadow_2base_8h.html#aaddfd904e59a1fc9fb07019cff73adee">MSHADOW_MIN_PAD_RATIO</a> * 32) {</div><div class="line"><a name="l00060"></a><span class="lineno"> 60</span>&#160; <a class="code" href="3rdparty_2mshadow_2mshadow_2base_8h.html#a8f433b4dd005a854eec58178ffd3d4bd">MSHADOW_CUDA_CALL</a>(cudaMallocPitch(reinterpret_cast&lt;void**&gt;(&amp;(obj-&gt;<a class="code" href="structmshadow_1_1Tensor.html#ad86d6759c585efb5229b3a0659973838">dptr_</a>)), &amp;pitch,</div><div class="line"><a name="l00061"></a><span class="lineno"> 61</span>&#160; obj-&gt;<a class="code" href="structmshadow_1_1Tensor.html#a88cbcae11653307bfa4c99804320b638">size</a>(dim - 1) * <span class="keyword">sizeof</span>(DType),</div><div class="line"><a name="l00062"></a><span class="lineno"> 62</span>&#160; obj-&gt;<a class="code" href="structmshadow_1_1Tensor.html#ad10c7414c5948e789e8761df2083c4e5">shape_</a>.FlatTo2D()[0]));</div><div class="line"><a name="l00063"></a><span class="lineno"> 63</span>&#160; obj-&gt;<a class="code" href="structmshadow_1_1Tensor.html#afee556f188e29bbd0ecc45fe98d3c1c3">stride_</a> = <span class="keyword">static_cast&lt;</span><a class="code" href="namespacemshadow.html#adcbc2e1131386fccb1474b0bdf045926">index_t</a><span class="keyword">&gt;</span>(pitch / <span class="keyword">sizeof</span>(DType));</div><div class="line"><a name="l00064"></a><span class="lineno"> 64</span>&#160; } <span class="keywordflow">else</span> {</div><div class="line"><a name="l00065"></a><span class="lineno"> 65</span>&#160; obj-&gt;<a class="code" href="structmshadow_1_1Tensor.html#afee556f188e29bbd0ecc45fe98d3c1c3">stride_</a> = obj-&gt;<a class="code" href="structmshadow_1_1Tensor.html#a88cbcae11653307bfa4c99804320b638">size</a>(dim - 1);</div><div class="line"><a name="l00066"></a><span class="lineno"> 66</span>&#160; <a class="code" href="3rdparty_2mshadow_2mshadow_2base_8h.html#a8f433b4dd005a854eec58178ffd3d4bd">MSHADOW_CUDA_CALL</a>(cudaMallocPitch(reinterpret_cast&lt;void**&gt;(&amp;(obj-&gt;<a class="code" href="structmshadow_1_1Tensor.html#ad86d6759c585efb5229b3a0659973838">dptr_</a>)), &amp;pitch,</div><div class="line"><a name="l00067"></a><span class="lineno"> 67</span>&#160; obj-&gt;<a class="code" href="structmshadow_1_1Tensor.html#ad10c7414c5948e789e8761df2083c4e5">shape_</a>.Size() * <span class="keyword">sizeof</span>(DType), 1));</div><div class="line"><a name="l00068"></a><span class="lineno"> 68</span>&#160; }</div><div class="line"><a name="l00069"></a><span class="lineno"> 69</span>&#160;}</div><div class="line"><a name="l00070"></a><span class="lineno"> 70</span>&#160;<span class="keyword">template</span>&lt;<span class="keywordtype">int</span> dim, <span class="keyword">typename</span> DType&gt;</div><div class="line"><a name="l00071"></a><span class="lineno"><a class="line" href="namespacemshadow.html#af9165b1f0b61124039eda7e429e9502a"> 71</a></span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#a3e8485c882dab873525b4b241e5db7ab">FreeSpace</a>(<a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, dim, DType&gt;</a> *obj) {</div><div class="line"><a name="l00072"></a><span class="lineno"> 72</span>&#160; <a class="code" href="3rdparty_2mshadow_2mshadow_2base_8h.html#a8f433b4dd005a854eec58178ffd3d4bd">MSHADOW_CUDA_CALL</a>(cudaFree(obj-&gt;<a class="code" href="structmshadow_1_1Tensor.html#ad86d6759c585efb5229b3a0659973838">dptr_</a>));</div><div class="line"><a name="l00073"></a><span class="lineno"> 73</span>&#160; obj-&gt;<a class="code" href="structmshadow_1_1Tensor.html#ad86d6759c585efb5229b3a0659973838">dptr_</a> = NULL;</div><div class="line"><a name="l00074"></a><span class="lineno"> 74</span>&#160;}</div><div class="line"><a name="l00075"></a><span class="lineno"> 75</span>&#160;<span class="keyword">template</span>&lt;<span class="keyword">typename</span> A, <span class="keyword">typename</span> B, <span class="keywordtype">int</span> dim, <span class="keyword">typename</span> DType&gt;</div><div class="line"><a name="l00076"></a><span class="lineno"><a class="line" href="namespacemshadow.html#a3a8bd22ed32f22db65c14988f75e7a8b"> 76</a></span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#a0435f827863555e4ed19bbc0cc81fa39">Copy</a>(<a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;A, dim, DType&gt;</a> _dst,</div><div class="line"><a name="l00077"></a><span class="lineno"> 77</span>&#160; <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;B, dim, DType&gt;</a> _src,</div><div class="line"><a name="l00078"></a><span class="lineno"> 78</span>&#160; cudaMemcpyKind kind,</div><div class="line"><a name="l00079"></a><span class="lineno"> 79</span>&#160; <a class="code" href="structmshadow_1_1Stream_3_01gpu_01_4.html">Stream&lt;gpu&gt;</a> *stream) {</div><div class="line"><a name="l00080"></a><span class="lineno"> 80</span>&#160; CHECK_EQ(_dst.<a class="code" href="structmshadow_1_1Tensor.html#ad10c7414c5948e789e8761df2083c4e5">shape_</a>, _src.<a class="code" href="structmshadow_1_1Tensor.html#ad10c7414c5948e789e8761df2083c4e5">shape_</a>) &lt;&lt; <span class="stringliteral">&quot;Copy:shape mismatch&quot;</span>;</div><div class="line"><a name="l00081"></a><span class="lineno"> 81</span>&#160; <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;A, 2, DType&gt;</a> dst = _dst.<a class="code" href="structmshadow_1_1Tensor.html#a48a5927e810fbc45e43e92cfe397d9f2">FlatTo2D</a>();</div><div class="line"><a name="l00082"></a><span class="lineno"> 82</span>&#160; <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;B, 2, DType&gt;</a> src = _src.<a class="code" href="structmshadow_1_1Tensor.html#a48a5927e810fbc45e43e92cfe397d9f2">FlatTo2D</a>();</div><div class="line"><a name="l00083"></a><span class="lineno"> 83</span>&#160; <a class="code" href="3rdparty_2mshadow_2mshadow_2base_8h.html#a8f433b4dd005a854eec58178ffd3d4bd">MSHADOW_CUDA_CALL</a>(cudaMemcpy2DAsync(dst.<a class="code" href="structmshadow_1_1Tensor.html#ad86d6759c585efb5229b3a0659973838">dptr_</a>, dst.<a class="code" href="structmshadow_1_1Tensor.html#afee556f188e29bbd0ecc45fe98d3c1c3">stride_</a> * <span class="keyword">sizeof</span>(DType),</div><div class="line"><a name="l00084"></a><span class="lineno"> 84</span>&#160; src.<a class="code" href="structmshadow_1_1Tensor.html#ad86d6759c585efb5229b3a0659973838">dptr_</a>, src.<a class="code" href="structmshadow_1_1Tensor.html#afee556f188e29bbd0ecc45fe98d3c1c3">stride_</a> * <span class="keyword">sizeof</span>(DType),</div><div class="line"><a name="l00085"></a><span class="lineno"> 85</span>&#160; dst.<a class="code" href="structmshadow_1_1Tensor.html#a88cbcae11653307bfa4c99804320b638">size</a>(1) * <span class="keyword">sizeof</span>(DType),</div><div class="line"><a name="l00086"></a><span class="lineno"> 86</span>&#160; dst.<a class="code" href="structmshadow_1_1Tensor.html#a88cbcae11653307bfa4c99804320b638">size</a>(0), kind,</div><div class="line"><a name="l00087"></a><span class="lineno"> 87</span>&#160; <a class="code" href="structmshadow_1_1Stream.html">Stream&lt;gpu&gt;::GetStream</a>(stream)));</div><div class="line"><a name="l00088"></a><span class="lineno"> 88</span>&#160; <span class="comment">// use synchronize call behavior for zero stream</span></div><div class="line"><a name="l00089"></a><span class="lineno"> 89</span>&#160; <span class="keywordflow">if</span> (stream == NULL) {</div><div class="line"><a name="l00090"></a><span class="lineno"> 90</span>&#160; <a class="code" href="3rdparty_2mshadow_2mshadow_2base_8h.html#a8f433b4dd005a854eec58178ffd3d4bd">MSHADOW_CUDA_CALL</a>(cudaStreamSynchronize(0));</div><div class="line"><a name="l00091"></a><span class="lineno"> 91</span>&#160; }</div><div class="line"><a name="l00092"></a><span class="lineno"> 92</span>&#160;}</div><div class="line"><a name="l00093"></a><span class="lineno"> 93</span>&#160;<span class="keyword">template</span>&lt;<span class="keywordtype">int</span> dim, <span class="keyword">typename</span> DType&gt;</div><div class="line"><a name="l00094"></a><span class="lineno"><a class="line" href="namespacemshadow.html#a46510affaaff69ed864ca07c380f5c08"> 94</a></span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#a0435f827863555e4ed19bbc0cc81fa39">Copy</a>(<a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;cpu, dim, DType&gt;</a> dst,</div><div class="line"><a name="l00095"></a><span class="lineno"> 95</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, dim, DType&gt;</a> &amp;src,</div><div class="line"><a name="l00096"></a><span class="lineno"> 96</span>&#160; <a class="code" href="structmshadow_1_1Stream_3_01gpu_01_4.html">Stream&lt;gpu&gt;</a> *stream) {</div><div class="line"><a name="l00097"></a><span class="lineno"> 97</span>&#160; <a class="code" href="namespacemshadow.html#a0435f827863555e4ed19bbc0cc81fa39">Copy</a>(dst, src, cudaMemcpyDeviceToHost, stream);</div><div class="line"><a name="l00098"></a><span class="lineno"> 98</span>&#160;}</div><div class="line"><a name="l00099"></a><span class="lineno"> 99</span>&#160;<span class="keyword">template</span>&lt;<span class="keywordtype">int</span> dim, <span class="keyword">typename</span> DType&gt;</div><div class="line"><a name="l00100"></a><span class="lineno"><a class="line" href="namespacemshadow.html#a4680ae46659d9afffebf8f2a5891507d"> 100</a></span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#a0435f827863555e4ed19bbc0cc81fa39">Copy</a>(<a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, dim, DType&gt;</a> dst,</div><div class="line"><a name="l00101"></a><span class="lineno"> 101</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, dim, DType&gt;</a> &amp;src,</div><div class="line"><a name="l00102"></a><span class="lineno"> 102</span>&#160; <a class="code" href="structmshadow_1_1Stream_3_01gpu_01_4.html">Stream&lt;gpu&gt;</a> *stream) {</div><div class="line"><a name="l00103"></a><span class="lineno"> 103</span>&#160; <a class="code" href="namespacemshadow.html#a0435f827863555e4ed19bbc0cc81fa39">Copy</a>(dst, src, cudaMemcpyDeviceToDevice, stream);</div><div class="line"><a name="l00104"></a><span class="lineno"> 104</span>&#160;}</div><div class="line"><a name="l00105"></a><span class="lineno"> 105</span>&#160;<span class="keyword">template</span>&lt;<span class="keywordtype">int</span> dim, <span class="keyword">typename</span> DType&gt;</div><div class="line"><a name="l00106"></a><span class="lineno"><a class="line" href="namespacemshadow.html#a124c5c071588b2f00059129b04369918"> 106</a></span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#a0435f827863555e4ed19bbc0cc81fa39">Copy</a>(<a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, dim, DType&gt;</a> dst,</div><div class="line"><a name="l00107"></a><span class="lineno"> 107</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;cpu, dim, DType&gt;</a> &amp;src,</div><div class="line"><a name="l00108"></a><span class="lineno"> 108</span>&#160; <a class="code" href="structmshadow_1_1Stream_3_01gpu_01_4.html">Stream&lt;gpu&gt;</a> *stream) {</div><div class="line"><a name="l00109"></a><span class="lineno"> 109</span>&#160; <a class="code" href="namespacemshadow.html#a0435f827863555e4ed19bbc0cc81fa39">Copy</a>(dst, src, cudaMemcpyHostToDevice, stream);</div><div class="line"><a name="l00110"></a><span class="lineno"> 110</span>&#160;}</div><div class="line"><a name="l00111"></a><span class="lineno"> 111</span>&#160;<span class="preprocessor">#endif // MSHADOW_USE_CUDA</span></div><div class="line"><a name="l00112"></a><span class="lineno"> 112</span>&#160;} <span class="comment">// namespace mshadow</span></div><div class="line"><a name="l00113"></a><span class="lineno"> 113</span>&#160;</div><div class="line"><a name="l00114"></a><span class="lineno"> 114</span>&#160;<span class="comment">// the following part is included only if compiler is nvcc</span></div><div class="line"><a name="l00115"></a><span class="lineno"> 115</span>&#160;<span class="preprocessor">#ifdef __CUDACC__</span></div><div class="line"><a name="l00116"></a><span class="lineno"> 116</span>&#160;<span class="preprocessor">#include &quot;./cuda/tensor_gpu-inl.cuh&quot;</span></div><div class="line"><a name="l00117"></a><span class="lineno"> 117</span>&#160;</div><div class="line"><a name="l00118"></a><span class="lineno"> 118</span>&#160;<span class="keyword">namespace </span><a class="code" href="namespacemshadow.html">mshadow</a> {</div><div class="line"><a name="l00119"></a><span class="lineno"> 119</span>&#160;<span class="keyword">template</span>&lt;<span class="keyword">typename</span> Saver, <span class="keyword">typename</span> R, <span class="keywordtype">int</span> dim,</div><div class="line"><a name="l00120"></a><span class="lineno"> 120</span>&#160; <span class="keyword">typename</span> DType, <span class="keyword">typename</span> E, <span class="keywordtype">int</span> etype&gt;</div><div class="line"><a name="l00121"></a><span class="lineno"> 121</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#a4bdc2c62fd5dcee696cadd2351bf85e2">MapExp</a>(<a class="code" href="structmshadow_1_1TRValue.html">TRValue&lt;R, gpu, dim, DType&gt;</a> *dst,</div><div class="line"><a name="l00122"></a><span class="lineno"> 122</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1expr_1_1Exp.html">expr::Exp&lt;E, DType, etype&gt;</a> &amp;exp) {</div><div class="line"><a name="l00123"></a><span class="lineno"> 123</span>&#160; <a class="code" href="structmshadow_1_1expr_1_1TypeCheckPass.html">expr::TypeCheckPass&lt;expr::TypeCheck&lt;gpu, dim, DType, E&gt;::kMapPass</a>&gt;</div><div class="line"><a name="l00124"></a><span class="lineno"> 124</span>&#160; ::Error_All_Tensor_in_Exp_Must_Have_Same_Type();</div><div class="line"><a name="l00125"></a><span class="lineno"> 125</span>&#160; <a class="code" href="structmshadow_1_1Shape.html">Shape&lt;dim&gt;</a> eshape = <a class="code" href="structmshadow_1_1expr_1_1ShapeCheck.html#af44e6a3b97cf3b245bc1b7ad4cacb306">expr::ShapeCheck&lt;dim, E&gt;::Check</a>(exp.<a class="code" href="structmshadow_1_1expr_1_1Exp.html#adb7e4afd0baed78d66cff87de0a6621f">self</a>());</div><div class="line"><a name="l00126"></a><span class="lineno"> 126</span>&#160; <a class="code" href="structmshadow_1_1Shape.html">Shape&lt;dim&gt;</a> dshape = <a class="code" href="structmshadow_1_1expr_1_1ShapeCheck.html#af44e6a3b97cf3b245bc1b7ad4cacb306">expr::ShapeCheck&lt;dim, R&gt;::Check</a>(dst-&gt;<a class="code" href="structmshadow_1_1expr_1_1Exp.html#adb7e4afd0baed78d66cff87de0a6621f">self</a>());</div><div class="line"><a name="l00127"></a><span class="lineno"> 127</span>&#160; CHECK(eshape[0] == 0 || eshape == dshape)</div><div class="line"><a name="l00128"></a><span class="lineno"> 128</span>&#160; &lt;&lt; <span class="stringliteral">&quot;Assignment: Shape of Tensors are not consistent with target, &quot;</span></div><div class="line"><a name="l00129"></a><span class="lineno"> 129</span>&#160; &lt;&lt; <span class="stringliteral">&quot;eshape: &quot;</span> &lt;&lt; eshape &lt;&lt; <span class="stringliteral">&quot; dshape:&quot;</span> &lt;&lt; dshape;</div><div class="line"><a name="l00130"></a><span class="lineno"> 130</span>&#160; cuda::MapPlan&lt;Saver&gt;(<a class="code" href="namespacemshadow_1_1expr.html#aefd008a0bf012dc8fb99d9bbcca7a078">MakePlan</a>(dst-&gt;<a class="code" href="structmshadow_1_1expr_1_1Exp.html#adb7e4afd0baed78d66cff87de0a6621f">self</a>()),</div><div class="line"><a name="l00131"></a><span class="lineno"> 131</span>&#160; <a class="code" href="namespacemshadow_1_1expr.html#aefd008a0bf012dc8fb99d9bbcca7a078">MakePlan</a>(exp.<a class="code" href="structmshadow_1_1expr_1_1Exp.html#adb7e4afd0baed78d66cff87de0a6621f">self</a>()),</div><div class="line"><a name="l00132"></a><span class="lineno"> 132</span>&#160; dshape.FlatTo2D(),</div><div class="line"><a name="l00133"></a><span class="lineno"> 133</span>&#160; <a class="code" href="structmshadow_1_1Stream.html">Stream&lt;gpu&gt;::GetStream</a>(<a class="code" href="structmshadow_1_1expr_1_1StreamInfo.html">expr::StreamInfo&lt;gpu, R&gt;::Get</a>(dst-&gt;<a class="code" href="structmshadow_1_1expr_1_1Exp.html#adb7e4afd0baed78d66cff87de0a6621f">self</a>())));</div><div class="line"><a name="l00134"></a><span class="lineno"> 134</span>&#160;}</div><div class="line"><a name="l00135"></a><span class="lineno"> 135</span>&#160;</div><div class="line"><a name="l00136"></a><span class="lineno"> 136</span>&#160;<span class="keyword">template</span>&lt;<span class="keyword">typename</span> Saver, <span class="keyword">typename</span> Reducer,</div><div class="line"><a name="l00137"></a><span class="lineno"> 137</span>&#160; <span class="keyword">typename</span> R, <span class="keyword">typename</span> DType, <span class="keyword">typename</span> E, <span class="keywordtype">int</span> etype&gt;</div><div class="line"><a name="l00138"></a><span class="lineno"> 138</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#ae1734eb7939fe9627de46d62494fe9dc">MapReduceKeepLowest</a>(<a class="code" href="structmshadow_1_1TRValue.html">TRValue&lt;R, gpu, 1, DType&gt;</a> *dst,</div><div class="line"><a name="l00139"></a><span class="lineno"> 139</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1expr_1_1Exp.html">expr::Exp&lt;E, DType, etype&gt;</a> &amp;exp,</div><div class="line"><a name="l00140"></a><span class="lineno"> 140</span>&#160; DType scale) {</div><div class="line"><a name="l00141"></a><span class="lineno"> 141</span>&#160; <a class="code" href="structmshadow_1_1expr_1_1TypeCheckPass.html">expr::TypeCheckPass&lt;expr::TypeCheck&lt;gpu, 1, DType, E&gt;::kRedPass</a>&gt;</div><div class="line"><a name="l00142"></a><span class="lineno"> 142</span>&#160; ::Error_TypeCheck_Not_Pass_For_Reduce_Exp();</div><div class="line"><a name="l00143"></a><span class="lineno"> 143</span>&#160; <a class="code" href="structmshadow_1_1Shape.html">Shape&lt;2&gt;</a> eshape = <a class="code" href="structmshadow_1_1expr_1_1ShapeCheck.html">expr::ShapeCheck&lt;expr::ExpInfo&lt;E&gt;::kDim</a>, E&gt;</div><div class="line"><a name="l00144"></a><span class="lineno"> 144</span>&#160; ::Check(exp.<a class="code" href="structmshadow_1_1expr_1_1Exp.html#adb7e4afd0baed78d66cff87de0a6621f">self</a>()).FlatTo2D();</div><div class="line"><a name="l00145"></a><span class="lineno"> 145</span>&#160; <a class="code" href="structmshadow_1_1Shape.html">Shape&lt;1&gt;</a> dshape = <a class="code" href="structmshadow_1_1expr_1_1ShapeCheck.html#af44e6a3b97cf3b245bc1b7ad4cacb306">expr::ShapeCheck&lt;1, R&gt;::Check</a>(dst-&gt;<a class="code" href="structmshadow_1_1expr_1_1Exp.html#adb7e4afd0baed78d66cff87de0a6621f">self</a>());</div><div class="line"><a name="l00146"></a><span class="lineno"> 146</span>&#160; CHECK_EQ(eshape[1], dshape[0]) &lt;&lt; <span class="stringliteral">&quot;MapReduceKeepLowest::reduction dimension do not match&quot;</span>;</div><div class="line"><a name="l00147"></a><span class="lineno"> 147</span>&#160; CHECK_NE(eshape[0], 0U) &lt;&lt; <span class="stringliteral">&quot;can not reduce over empty tensor&quot;</span>;</div><div class="line"><a name="l00148"></a><span class="lineno"> 148</span>&#160; cuda::MapReduceKeepLowest&lt;Saver, Reducer&gt;</div><div class="line"><a name="l00149"></a><span class="lineno"> 149</span>&#160; (<a class="code" href="namespacemshadow_1_1expr.html#aefd008a0bf012dc8fb99d9bbcca7a078">MakePlan</a>(dst-&gt;<a class="code" href="structmshadow_1_1expr_1_1Exp.html#adb7e4afd0baed78d66cff87de0a6621f">self</a>()), <a class="code" href="namespacemshadow_1_1expr.html#aefd008a0bf012dc8fb99d9bbcca7a078">MakePlan</a>(exp.<a class="code" href="structmshadow_1_1expr_1_1Exp.html#adb7e4afd0baed78d66cff87de0a6621f">self</a>()), scale, eshape,</div><div class="line"><a name="l00150"></a><span class="lineno"> 150</span>&#160; <a class="code" href="structmshadow_1_1Stream.html">Stream&lt;gpu&gt;::GetStream</a>(<a class="code" href="structmshadow_1_1expr_1_1StreamInfo.html">expr::StreamInfo&lt;gpu, R&gt;::Get</a>(dst-&gt;<a class="code" href="structmshadow_1_1expr_1_1Exp.html#adb7e4afd0baed78d66cff87de0a6621f">self</a>())));</div><div class="line"><a name="l00151"></a><span class="lineno"> 151</span>&#160;}</div><div class="line"><a name="l00152"></a><span class="lineno"> 152</span>&#160;</div><div class="line"><a name="l00153"></a><span class="lineno"> 153</span>&#160;<span class="keyword">template</span>&lt;<span class="keyword">typename</span> Saver, <span class="keyword">typename</span> Reducer, <span class="keywordtype">int</span> dimkeep,</div><div class="line"><a name="l00154"></a><span class="lineno"> 154</span>&#160; <span class="keyword">typename</span> R, <span class="keyword">typename</span> DType, <span class="keyword">typename</span> E, <span class="keywordtype">int</span> etype&gt;</div><div class="line"><a name="l00155"></a><span class="lineno"> 155</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#a5fce5c2df842439cc7d2d7a90e2cf7d4">MapReduceKeepHighDim</a>(<a class="code" href="structmshadow_1_1TRValue.html">TRValue&lt;R, gpu, 1, DType&gt;</a> *dst,</div><div class="line"><a name="l00156"></a><span class="lineno"> 156</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1expr_1_1Exp.html">expr::Exp&lt;E, DType, etype&gt;</a> &amp;exp,</div><div class="line"><a name="l00157"></a><span class="lineno"> 157</span>&#160; DType scale) {</div><div class="line"><a name="l00158"></a><span class="lineno"> 158</span>&#160; <a class="code" href="structmshadow_1_1expr_1_1TypeCheckPass.html">expr::TypeCheckPass&lt;expr::TypeCheck&lt;gpu, dimkeep, DType, E&gt;::kRedPass</a>&gt;</div><div class="line"><a name="l00159"></a><span class="lineno"> 159</span>&#160; ::Error_TypeCheck_Not_Pass_For_Reduce_Exp();</div><div class="line"><a name="l00160"></a><span class="lineno"> 160</span>&#160; <span class="keyword">typedef</span> <a class="code" href="structmshadow_1_1Shape.html">Shape&lt;expr::ExpInfo&lt;E&gt;::kDim</a>&gt; EShape;</div><div class="line"><a name="l00161"></a><span class="lineno"> 161</span>&#160; EShape eshape = <a class="code" href="structmshadow_1_1expr_1_1ShapeCheck.html">expr::ShapeCheck&lt;expr::ExpInfo&lt;E&gt;::kDim</a>, E&gt;</div><div class="line"><a name="l00162"></a><span class="lineno"> 162</span>&#160; ::Check(exp.<a class="code" href="structmshadow_1_1expr_1_1Exp.html#adb7e4afd0baed78d66cff87de0a6621f">self</a>());</div><div class="line"><a name="l00163"></a><span class="lineno"> 163</span>&#160; <a class="code" href="structmshadow_1_1Shape.html">Shape&lt;1&gt;</a> dshape = <a class="code" href="structmshadow_1_1expr_1_1ShapeCheck.html#af44e6a3b97cf3b245bc1b7ad4cacb306">expr::ShapeCheck&lt;1, R&gt;::Check</a>(dst-&gt;<a class="code" href="structmshadow_1_1expr_1_1Exp.html#adb7e4afd0baed78d66cff87de0a6621f">self</a>());</div><div class="line"><a name="l00164"></a><span class="lineno"> 164</span>&#160; CHECK_EQ(eshape[dimkeep], dshape[0]) &lt;&lt; <span class="stringliteral">&quot;MapReduceKeepHighDim::reduction dimension do not match&quot;</span>;</div><div class="line"><a name="l00165"></a><span class="lineno"> 165</span>&#160; <span class="comment">// use equvalent form</span></div><div class="line"><a name="l00166"></a><span class="lineno"> 166</span>&#160; <a class="code" href="structmshadow_1_1Shape.html">Shape&lt;4&gt;</a> pshape = <a class="code" href="namespacemshadow.html#a8fc5237744c6eda97f3070ddcb0c715e">Shape4</a>(eshape.ProdShape(0, dimkeep),</div><div class="line"><a name="l00167"></a><span class="lineno"> 167</span>&#160; eshape[dimkeep],</div><div class="line"><a name="l00168"></a><span class="lineno"> 168</span>&#160; eshape.ProdShape(dimkeep + 1, EShape::kSubdim),</div><div class="line"><a name="l00169"></a><span class="lineno"> 169</span>&#160; eshape[EShape::kSubdim]);</div><div class="line"><a name="l00170"></a><span class="lineno"> 170</span>&#160; <span class="comment">// call equavalent map red dim 2</span></div><div class="line"><a name="l00171"></a><span class="lineno"> 171</span>&#160; cuda::MapReduceKeepDim1&lt;Saver, Reducer&gt;</div><div class="line"><a name="l00172"></a><span class="lineno"> 172</span>&#160; (<a class="code" href="namespacemshadow_1_1expr.html#aefd008a0bf012dc8fb99d9bbcca7a078">MakePlan</a>(dst-&gt;<a class="code" href="structmshadow_1_1expr_1_1Exp.html#adb7e4afd0baed78d66cff87de0a6621f">self</a>()), <a class="code" href="namespacemshadow_1_1expr.html#aefd008a0bf012dc8fb99d9bbcca7a078">MakePlan</a>(exp.<a class="code" href="structmshadow_1_1expr_1_1Exp.html#adb7e4afd0baed78d66cff87de0a6621f">self</a>()), scale, pshape,</div><div class="line"><a name="l00173"></a><span class="lineno"> 173</span>&#160; <a class="code" href="structmshadow_1_1Stream.html">Stream&lt;gpu&gt;::GetStream</a>(<a class="code" href="structmshadow_1_1expr_1_1StreamInfo.html">expr::StreamInfo&lt;gpu, R&gt;::Get</a>(dst-&gt;<a class="code" href="structmshadow_1_1expr_1_1Exp.html#adb7e4afd0baed78d66cff87de0a6621f">self</a>())));</div><div class="line"><a name="l00174"></a><span class="lineno"> 174</span>&#160;}</div><div class="line"><a name="l00175"></a><span class="lineno"> 175</span>&#160;<span class="keyword">template</span>&lt;<span class="keyword">typename</span> DType&gt;</div><div class="line"><a name="l00176"></a><span class="lineno"> 176</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#a75161ef5ed964dcf6518e3a7e59e6fb6">Softmax</a>(<a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 2, DType&gt;</a> dst,</div><div class="line"><a name="l00177"></a><span class="lineno"> 177</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 2, DType&gt;</a>&amp; src) {</div><div class="line"><a name="l00178"></a><span class="lineno"> 178</span>&#160; <a class="code" href="namespacemshadow.html#a75161ef5ed964dcf6518e3a7e59e6fb6">cuda::Softmax</a>(dst, src);</div><div class="line"><a name="l00179"></a><span class="lineno"> 179</span>&#160;}</div><div class="line"><a name="l00180"></a><span class="lineno"> 180</span>&#160;</div><div class="line"><a name="l00181"></a><span class="lineno"> 181</span>&#160;<span class="keyword">template</span>&lt;<span class="keyword">typename</span> DType&gt;</div><div class="line"><a name="l00182"></a><span class="lineno"> 182</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#a75161ef5ed964dcf6518e3a7e59e6fb6">Softmax</a>(<a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 3, DType&gt;</a> dst,</div><div class="line"><a name="l00183"></a><span class="lineno"> 183</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 3, DType&gt;</a>&amp; src) {</div><div class="line"><a name="l00184"></a><span class="lineno"> 184</span>&#160; <a class="code" href="namespacemshadow.html#a75161ef5ed964dcf6518e3a7e59e6fb6">cuda::Softmax</a>(dst, src);</div><div class="line"><a name="l00185"></a><span class="lineno"> 185</span>&#160;}</div><div class="line"><a name="l00186"></a><span class="lineno"> 186</span>&#160;</div><div class="line"><a name="l00187"></a><span class="lineno"> 187</span>&#160;<span class="keyword">template</span>&lt;<span class="keyword">typename</span> DType&gt;</div><div class="line"><a name="l00188"></a><span class="lineno"> 188</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#a3409c8a836e7dab83bec25556164261c">SoftmaxGrad</a>(<span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 2, DType&gt;</a> &amp;dst,</div><div class="line"><a name="l00189"></a><span class="lineno"> 189</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 2, DType&gt;</a> &amp;src,</div><div class="line"><a name="l00190"></a><span class="lineno"> 190</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 1, DType&gt;</a> &amp;label) {</div><div class="line"><a name="l00191"></a><span class="lineno"> 191</span>&#160; <a class="code" href="namespacemshadow.html#a3409c8a836e7dab83bec25556164261c">cuda::SoftmaxGrad</a>(dst, src, label);</div><div class="line"><a name="l00192"></a><span class="lineno"> 192</span>&#160;}</div><div class="line"><a name="l00193"></a><span class="lineno"> 193</span>&#160;</div><div class="line"><a name="l00194"></a><span class="lineno"> 194</span>&#160;<span class="keyword">template</span>&lt;<span class="keyword">typename</span> DType&gt;</div><div class="line"><a name="l00195"></a><span class="lineno"> 195</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#a4fb3739d11e671f9809458cf6ada1e64">SmoothSoftmaxGrad</a>(<span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 2, DType&gt;</a> &amp;dst,</div><div class="line"><a name="l00196"></a><span class="lineno"> 196</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 2, DType&gt;</a> &amp;src,</div><div class="line"><a name="l00197"></a><span class="lineno"> 197</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 1, DType&gt;</a> &amp;label,</div><div class="line"><a name="l00198"></a><span class="lineno"> 198</span>&#160; <span class="keyword">const</span> <span class="keywordtype">float</span> alpha) {</div><div class="line"><a name="l00199"></a><span class="lineno"> 199</span>&#160; <a class="code" href="namespacemshadow.html#a4fb3739d11e671f9809458cf6ada1e64">cuda::SmoothSoftmaxGrad</a>(dst, src, label, alpha);</div><div class="line"><a name="l00200"></a><span class="lineno"> 200</span>&#160;}</div><div class="line"><a name="l00201"></a><span class="lineno"> 201</span>&#160;</div><div class="line"><a name="l00202"></a><span class="lineno"> 202</span>&#160;<span class="keyword">template</span>&lt;<span class="keyword">typename</span> DType&gt;</div><div class="line"><a name="l00203"></a><span class="lineno"> 203</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#a3409c8a836e7dab83bec25556164261c">SoftmaxGrad</a>(<span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 2, DType&gt;</a> &amp;dst,</div><div class="line"><a name="l00204"></a><span class="lineno"> 204</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 2, DType&gt;</a> &amp;src,</div><div class="line"><a name="l00205"></a><span class="lineno"> 205</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 1, DType&gt;</a> &amp;label,</div><div class="line"><a name="l00206"></a><span class="lineno"> 206</span>&#160; <span class="keyword">const</span> DType &amp;ignore_label) {</div><div class="line"><a name="l00207"></a><span class="lineno"> 207</span>&#160; <a class="code" href="namespacemshadow.html#a3409c8a836e7dab83bec25556164261c">cuda::SoftmaxGrad</a>(dst, src, label, ignore_label);</div><div class="line"><a name="l00208"></a><span class="lineno"> 208</span>&#160;}</div><div class="line"><a name="l00209"></a><span class="lineno"> 209</span>&#160;</div><div class="line"><a name="l00210"></a><span class="lineno"> 210</span>&#160;<span class="keyword">template</span>&lt;<span class="keyword">typename</span> DType&gt;</div><div class="line"><a name="l00211"></a><span class="lineno"> 211</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#a4fb3739d11e671f9809458cf6ada1e64">SmoothSoftmaxGrad</a>(<span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 2, DType&gt;</a> &amp;dst,</div><div class="line"><a name="l00212"></a><span class="lineno"> 212</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 2, DType&gt;</a> &amp;src,</div><div class="line"><a name="l00213"></a><span class="lineno"> 213</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 1, DType&gt;</a> &amp;label,</div><div class="line"><a name="l00214"></a><span class="lineno"> 214</span>&#160; <span class="keyword">const</span> DType &amp;ignore_label,</div><div class="line"><a name="l00215"></a><span class="lineno"> 215</span>&#160; <span class="keyword">const</span> <span class="keywordtype">float</span> alpha) {</div><div class="line"><a name="l00216"></a><span class="lineno"> 216</span>&#160; <a class="code" href="namespacemshadow.html#a4fb3739d11e671f9809458cf6ada1e64">cuda::SmoothSoftmaxGrad</a>(dst, src, label, ignore_label, alpha);</div><div class="line"><a name="l00217"></a><span class="lineno"> 217</span>&#160;}</div><div class="line"><a name="l00218"></a><span class="lineno"> 218</span>&#160;</div><div class="line"><a name="l00219"></a><span class="lineno"> 219</span>&#160;<span class="keyword">template</span>&lt;<span class="keyword">typename</span> DType&gt;</div><div class="line"><a name="l00220"></a><span class="lineno"> 220</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#a3409c8a836e7dab83bec25556164261c">SoftmaxGrad</a>(<span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 3, DType&gt;</a> &amp;dst,</div><div class="line"><a name="l00221"></a><span class="lineno"> 221</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 3, DType&gt;</a> &amp;src,</div><div class="line"><a name="l00222"></a><span class="lineno"> 222</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 2, DType&gt;</a> &amp;label) {</div><div class="line"><a name="l00223"></a><span class="lineno"> 223</span>&#160; <a class="code" href="namespacemshadow.html#a3409c8a836e7dab83bec25556164261c">cuda::SoftmaxGrad</a>(dst, src, label);</div><div class="line"><a name="l00224"></a><span class="lineno"> 224</span>&#160;}</div><div class="line"><a name="l00225"></a><span class="lineno"> 225</span>&#160;</div><div class="line"><a name="l00226"></a><span class="lineno"> 226</span>&#160;<span class="keyword">template</span>&lt;<span class="keyword">typename</span> DType&gt;</div><div class="line"><a name="l00227"></a><span class="lineno"> 227</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#a3409c8a836e7dab83bec25556164261c">SoftmaxGrad</a>(<span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 3, DType&gt;</a> &amp;dst,</div><div class="line"><a name="l00228"></a><span class="lineno"> 228</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 3, DType&gt;</a> &amp;src,</div><div class="line"><a name="l00229"></a><span class="lineno"> 229</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 2, DType&gt;</a> &amp;label,</div><div class="line"><a name="l00230"></a><span class="lineno"> 230</span>&#160; <span class="keyword">const</span> DType &amp;ignore_label) {</div><div class="line"><a name="l00231"></a><span class="lineno"> 231</span>&#160; <a class="code" href="namespacemshadow.html#a3409c8a836e7dab83bec25556164261c">cuda::SoftmaxGrad</a>(dst, src, label, ignore_label);</div><div class="line"><a name="l00232"></a><span class="lineno"> 232</span>&#160;}</div><div class="line"><a name="l00233"></a><span class="lineno"> 233</span>&#160;</div><div class="line"><a name="l00234"></a><span class="lineno"> 234</span>&#160;<span class="keyword">template</span>&lt;<span class="keywordtype">bool</span> clip, <span class="keyword">typename</span> IndexType, <span class="keyword">typename</span> DType&gt;</div><div class="line"><a name="l00235"></a><span class="lineno"> 235</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#a802d44c652195053e87b63881c7d4d1c">AddTakeGrad</a>(<a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 2, DType&gt;</a> dst,</div><div class="line"><a name="l00236"></a><span class="lineno"> 236</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 1, IndexType&gt;</a>&amp; index,</div><div class="line"><a name="l00237"></a><span class="lineno"> 237</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 2, DType&gt;</a> &amp;src) {</div><div class="line"><a name="l00238"></a><span class="lineno"> 238</span>&#160; cuda::AddTakeGrad&lt;clip, IndexType, DType&gt;(dst, index, src);</div><div class="line"><a name="l00239"></a><span class="lineno"> 239</span>&#160;}</div><div class="line"><a name="l00240"></a><span class="lineno"> 240</span>&#160;</div><div class="line"><a name="l00241"></a><span class="lineno"> 241</span>&#160;<span class="keyword">template</span>&lt;<span class="keyword">typename</span> IndexType, <span class="keyword">typename</span> DType&gt;</div><div class="line"><a name="l00242"></a><span class="lineno"> 242</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#ad2320213151030353ae90f92a0da808d">AddTakeGradLargeBatch</a>(<a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 2, DType&gt;</a> dst,</div><div class="line"><a name="l00243"></a><span class="lineno"> 243</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 1, IndexType&gt;</a>&amp; sorted,</div><div class="line"><a name="l00244"></a><span class="lineno"> 244</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 1, IndexType&gt;</a>&amp; index,</div><div class="line"><a name="l00245"></a><span class="lineno"> 245</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 2, DType&gt;</a> &amp;src) {</div><div class="line"><a name="l00246"></a><span class="lineno"> 246</span>&#160; <a class="code" href="namespacemshadow.html#ad2320213151030353ae90f92a0da808d">cuda::AddTakeGradLargeBatch</a>(dst, sorted, index, src);</div><div class="line"><a name="l00247"></a><span class="lineno"> 247</span>&#160;}</div><div class="line"><a name="l00248"></a><span class="lineno"> 248</span>&#160;</div><div class="line"><a name="l00249"></a><span class="lineno"> 249</span>&#160;<span class="keyword">template</span>&lt;<span class="keyword">typename</span> KDType, <span class="keyword">typename</span> VDType&gt;</div><div class="line"><a name="l00250"></a><span class="lineno"> 250</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#a3476fdf7d39add6f57beddafbc1b9625">SortByKey</a>(<a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 1, KDType&gt;</a> keys, <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 1, VDType&gt;</a> values,</div><div class="line"><a name="l00251"></a><span class="lineno"> 251</span>&#160; <span class="keywordtype">bool</span> is_ascend) {</div><div class="line"><a name="l00252"></a><span class="lineno"> 252</span>&#160; <a class="code" href="namespacemshadow.html#a3476fdf7d39add6f57beddafbc1b9625">cuda::SortByKey</a>(keys, values, is_ascend);</div><div class="line"><a name="l00253"></a><span class="lineno"> 253</span>&#160;}</div><div class="line"><a name="l00254"></a><span class="lineno"> 254</span>&#160;</div><div class="line"><a name="l00255"></a><span class="lineno"> 255</span>&#160;<span class="keyword">template</span>&lt;<span class="keyword">typename</span> IndexType, <span class="keyword">typename</span> DType&gt;</div><div class="line"><a name="l00256"></a><span class="lineno"> 256</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacemshadow.html#a77348ebaaac4581773b4fe78448c8fa6">IndexFill</a>(<a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 2, DType&gt;</a> dst,</div><div class="line"><a name="l00257"></a><span class="lineno"> 257</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 1, IndexType&gt;</a>&amp; index,</div><div class="line"><a name="l00258"></a><span class="lineno"> 258</span>&#160; <span class="keyword">const</span> <a class="code" href="structmshadow_1_1Tensor.html">Tensor&lt;gpu, 2, DType&gt;</a> &amp;src) {</div><div class="line"><a name="l00259"></a><span class="lineno"> 259</span>&#160; <a class="code" href="namespacemshadow.html#a77348ebaaac4581773b4fe78448c8fa6">cuda::IndexFill</a>(dst, index, src);</div><div class="line"><a name="l00260"></a><span class="lineno"> 260</span>&#160;}</div><div class="line"><a name="l00261"></a><span class="lineno"> 261</span>&#160;} <span class="comment">// namespace mshadow</span></div><div class="line"><a name="l00262"></a><span class="lineno"> 262</span>&#160;<span class="preprocessor">#endif // __CUDACC__</span></div><div class="line"><a name="l00263"></a><span class="lineno"> 263</span>&#160;<span class="preprocessor">#endif // MSHADOW_TENSOR_GPU_INL_H_</span></div><div class="ttc" id="namespacemshadow_html_a3e8485c882dab873525b4b241e5db7ab"><div class="ttname"><a href="namespacemshadow.html#a3e8485c882dab873525b4b241e5db7ab">mshadow::FreeSpace</a></div><div class="ttdeci">void FreeSpace(Tensor&lt; cpu, dim, DType &gt; *obj)</div><div class="ttdoc">CPU/GPU: free the space of tensor, will set obj.dptr to NULL. </div><div class="ttdef"><b>Definition:</b> tensor_cpu-inl.h:140</div></div>
<div class="ttc" id="namespacemshadow_html_a77348ebaaac4581773b4fe78448c8fa6"><div class="ttname"><a href="namespacemshadow.html#a77348ebaaac4581773b4fe78448c8fa6">mshadow::IndexFill</a></div><div class="ttdeci">void IndexFill(Tensor&lt; cpu, 2, DType &gt; dst, const Tensor&lt; cpu, 1, IndexType &gt; &amp;index, const Tensor&lt; cpu, 2, DType &gt; &amp;src)</div><div class="ttdoc">CPU/GPU: Fill the values of the destination matrix to specific rows in the source matrix...</div><div class="ttdef"><b>Definition:</b> tensor_cpu-inl.h:547</div></div>
<div class="ttc" id="namespacemshadow_html_a3409c8a836e7dab83bec25556164261c"><div class="ttname"><a href="namespacemshadow.html#a3409c8a836e7dab83bec25556164261c">mshadow::SoftmaxGrad</a></div><div class="ttdeci">void SoftmaxGrad(Tensor&lt; cpu, 2, DType &gt; dst, const Tensor&lt; cpu, 2, DType &gt; &amp;src, const Tensor&lt; cpu, 1, DType &gt; &amp;label)</div><div class="ttdoc">CPU/GPU: softmax gradient. </div><div class="ttdef"><b>Definition:</b> tensor_cpu-inl.h:306</div></div>
<div class="ttc" id="namespacemshadow_html_a4fb3739d11e671f9809458cf6ada1e64"><div class="ttname"><a href="namespacemshadow.html#a4fb3739d11e671f9809458cf6ada1e64">mshadow::SmoothSoftmaxGrad</a></div><div class="ttdeci">void SmoothSoftmaxGrad(Tensor&lt; cpu, 2, DType &gt; dst, const Tensor&lt; cpu, 2, DType &gt; &amp;src, const Tensor&lt; cpu, 1, DType &gt; &amp;label, const float alpha)</div><div class="ttdef"><b>Definition:</b> tensor_cpu-inl.h:323</div></div>
<div class="ttc" id="namespacemshadow_1_1expr_html_a97c45c821ac8275dd75fa585f47b9e00"><div class="ttname"><a href="namespacemshadow_1_1expr.html#a97c45c821ac8275dd75fa585f47b9e00">mshadow::expr::pad</a></div><div class="ttdeci">PaddingExp&lt; SrcExp, DType, ExpInfo&lt; SrcExp &gt;::kDim &gt; pad(const Exp&lt; SrcExp, DType, etype &gt; &amp;src, index_t pad)</div><div class="ttdoc">padding expression, pad a image with zeros on boundaries, padding affects shape[0], and shape[1] </div><div class="ttdef"><b>Definition:</b> pad.h:71</div></div>
<div class="ttc" id="structmshadow_1_1Tensor_html_ad86d6759c585efb5229b3a0659973838"><div class="ttname"><a href="structmshadow_1_1Tensor.html#ad86d6759c585efb5229b3a0659973838">mshadow::Tensor::dptr_</a></div><div class="ttdeci">DType * dptr_</div><div class="ttdoc">pointer to the data </div><div class="ttdef"><b>Definition:</b> tensor.h:434</div></div>
<div class="ttc" id="structmshadow_1_1TRValue_html"><div class="ttname"><a href="structmshadow_1_1TRValue.html">mshadow::TRValue</a></div><div class="ttdoc">Tensor RValue, this is the super type of all kinds of possible tensors. </div><div class="ttdef"><b>Definition:</b> tensor.h:409</div></div>
<div class="ttc" id="structmshadow_1_1expr_1_1Exp_html_adb7e4afd0baed78d66cff87de0a6621f"><div class="ttname"><a href="structmshadow_1_1expr_1_1Exp.html#adb7e4afd0baed78d66cff87de0a6621f">mshadow::expr::Exp::self</a></div><div class="ttdeci">const SubType &amp; self(void) const</div><div class="ttdef"><b>Definition:</b> expression.h:82</div></div>
<div class="ttc" id="structmshadow_1_1expr_1_1TypeCheckPass_html"><div class="ttname"><a href="structmshadow_1_1expr_1_1TypeCheckPass.html">mshadow::expr::TypeCheckPass</a></div><div class="ttdoc">used to help static type check </div><div class="ttdef"><b>Definition:</b> expr_engine-inl.h:330</div></div>
<div class="ttc" id="namespacemshadow_html_a0435f827863555e4ed19bbc0cc81fa39"><div class="ttname"><a href="namespacemshadow.html#a0435f827863555e4ed19bbc0cc81fa39">mshadow::Copy</a></div><div class="ttdeci">void Copy(Tensor&lt; cpu, dim, DType &gt; dst, const Tensor&lt; cpu, dim, DType &gt; &amp;src, Stream&lt; cpu &gt; *stream=NULL)</div><div class="ttdoc">copy data from one tensor to another, with same shape </div><div class="ttdef"><b>Definition:</b> tensor_cpu-inl.h:145</div></div>
<div class="ttc" id="structmshadow_1_1Shape_html"><div class="ttname"><a href="structmshadow_1_1Shape.html">mshadow::Shape&lt; dim &gt;</a></div></div>
<div class="ttc" id="namespacemshadow_html_a4bdc2c62fd5dcee696cadd2351bf85e2"><div class="ttname"><a href="namespacemshadow.html#a4bdc2c62fd5dcee696cadd2351bf85e2">mshadow::MapExp</a></div><div class="ttdeci">void MapExp(TRValue&lt; R, cpu, dim, DType &gt; *dst, const expr::Exp&lt; E, DType, etype &gt; &amp;exp)</div><div class="ttdoc">CPU/GPU: map a expression to a tensor, this function calls MapPlan. </div><div class="ttdef"><b>Definition:</b> tensor_cpu-inl.h:207</div></div>
<div class="ttc" id="structmshadow_1_1Stream_3_01gpu_01_4_html"><div class="ttname"><a href="structmshadow_1_1Stream_3_01gpu_01_4.html">mshadow::Stream&lt; gpu &gt;</a></div><div class="ttdef"><b>Definition:</b> stream_gpu-inl.h:37</div></div>
<div class="ttc" id="structmshadow_1_1Tensor_html_ad10c7414c5948e789e8761df2083c4e5"><div class="ttname"><a href="structmshadow_1_1Tensor.html#ad10c7414c5948e789e8761df2083c4e5">mshadow::Tensor::shape_</a></div><div class="ttdeci">Shape&lt; dimension &gt; shape_</div><div class="ttdoc">shape of the tensor </div><div class="ttdef"><b>Definition:</b> tensor.h:436</div></div>
<div class="ttc" id="namespacemshadow_html_a8fc5237744c6eda97f3070ddcb0c715e"><div class="ttname"><a href="namespacemshadow.html#a8fc5237744c6eda97f3070ddcb0c715e">mshadow::Shape4</a></div><div class="ttdeci">MSHADOW_XINLINE Shape&lt; 4 &gt; Shape4(index_t s0, index_t s1, index_t s2, index_t s3)</div><div class="ttdoc">construct a four dimension shape, stride will equal s0 </div><div class="ttdef"><b>Definition:</b> tensor.h:240</div></div>
<div class="ttc" id="namespacemshadow_html_a3476fdf7d39add6f57beddafbc1b9625"><div class="ttname"><a href="namespacemshadow.html#a3476fdf7d39add6f57beddafbc1b9625">mshadow::SortByKey</a></div><div class="ttdeci">void SortByKey(Tensor&lt; cpu, 1, KDType &gt; keys, Tensor&lt; cpu, 1, VDType &gt; values, bool is_ascend=true)</div><div class="ttdoc">CPU/GPU: Sort key-value pairs stored in separate places. (Stable sort is performed!) ...</div><div class="ttdef"><b>Definition:</b> tensor_cpu-inl.h:558</div></div>
<div class="ttc" id="namespacemshadow_html_a75161ef5ed964dcf6518e3a7e59e6fb6"><div class="ttname"><a href="namespacemshadow.html#a75161ef5ed964dcf6518e3a7e59e6fb6">mshadow::Softmax</a></div><div class="ttdeci">void Softmax(Tensor&lt; cpu, 2, DType &gt; dst, const Tensor&lt; cpu, 2, DType &gt; &amp;energy)</div><div class="ttdoc">CPU/GPU: normalize softmax: dst[i][j] = exp(energy[i][j]) /(sum_j exp(energy[i][j])) ...</div><div class="ttdef"><b>Definition:</b> tensor_cpu-inl.h:483</div></div>
<div class="ttc" id="3rdparty_2mshadow_2mshadow_2base_8h_html_a8f433b4dd005a854eec58178ffd3d4bd"><div class="ttname"><a href="3rdparty_2mshadow_2mshadow_2base_8h.html#a8f433b4dd005a854eec58178ffd3d4bd">MSHADOW_CUDA_CALL</a></div><div class="ttdeci">#define MSHADOW_CUDA_CALL(func)</div><div class="ttdoc">Protected cuda call in mshadow. </div><div class="ttdef"><b>Definition:</b> base.h:278</div></div>
<div class="ttc" id="namespacemshadow_html_ae1734eb7939fe9627de46d62494fe9dc"><div class="ttname"><a href="namespacemshadow.html#ae1734eb7939fe9627de46d62494fe9dc">mshadow::MapReduceKeepLowest</a></div><div class="ttdeci">void MapReduceKeepLowest(TRValue&lt; R, cpu, 1, DType &gt; *dst, const expr::Exp&lt; E, DType, etype &gt; &amp;exp, DType scale=1)</div><div class="ttdoc">CPU/GPU: map a expression, do reduction to 1D Tensor in lowest dimension (dimension 0) ...</div><div class="ttdef"><b>Definition:</b> tensor_cpu-inl.h:223</div></div>
<div class="ttc" id="structmshadow_1_1expr_1_1ShapeCheck_html_af44e6a3b97cf3b245bc1b7ad4cacb306"><div class="ttname"><a href="structmshadow_1_1expr_1_1ShapeCheck.html#af44e6a3b97cf3b245bc1b7ad4cacb306">mshadow::expr::ShapeCheck::Check</a></div><div class="ttdeci">static Shape&lt; dim &gt; Check(const E &amp;t)</div></div>
<div class="ttc" id="tensor_8h_html"><div class="ttname"><a href="tensor_8h.html">tensor.h</a></div><div class="ttdoc">header file of tensor data structure and functions This lib requires explicit memory allocation and d...</div></div>
<div class="ttc" id="structmshadow_1_1expr_1_1StreamInfo_html"><div class="ttname"><a href="structmshadow_1_1expr_1_1StreamInfo.html">mshadow::expr::StreamInfo</a></div><div class="ttdef"><b>Definition:</b> expr_engine-inl.h:345</div></div>
<div class="ttc" id="namespacemshadow_html_adcbc2e1131386fccb1474b0bdf045926"><div class="ttname"><a href="namespacemshadow.html#adcbc2e1131386fccb1474b0bdf045926">mshadow::index_t</a></div><div class="ttdeci">int32_t index_t</div><div class="ttdoc">type that will be used for index </div><div class="ttdef"><b>Definition:</b> base.h:343</div></div>
<div class="ttc" id="namespacemshadow_html_aef49c3cef522198322017315341ac689"><div class="ttname"><a href="namespacemshadow.html#aef49c3cef522198322017315341ac689">mshadow::AllocSpace</a></div><div class="ttdeci">void AllocSpace(Tensor&lt; cpu, dim, DType &gt; *obj, bool pad=MSHADOW_ALLOC_PAD)</div><div class="ttdoc">CPU/CPU: allocate space for CTensor, according to the shape in the obj this function is responsible t...</div><div class="ttdef"><b>Definition:</b> tensor_cpu-inl.h:116</div></div>
<div class="ttc" id="structmshadow_1_1Tensor_html_a48a5927e810fbc45e43e92cfe397d9f2"><div class="ttname"><a href="structmshadow_1_1Tensor.html#a48a5927e810fbc45e43e92cfe397d9f2">mshadow::Tensor::FlatTo2D</a></div><div class="ttdeci">MSHADOW_XINLINE Tensor&lt; Device, 2, DType &gt; FlatTo2D(void) const</div><div class="ttdoc">flatten the tensor to 2 dimension, collapse the higher dimensions together </div><div class="ttdef"><b>Definition:</b> tensor.h:519</div></div>
<div class="ttc" id="structmshadow_1_1Tensor_html_a88cbcae11653307bfa4c99804320b638"><div class="ttname"><a href="structmshadow_1_1Tensor.html#a88cbcae11653307bfa4c99804320b638">mshadow::Tensor::size</a></div><div class="ttdeci">MSHADOW_XINLINE index_t size(int idx) const</div><div class="ttdoc">return size of i-th dimension, start counting from highest dimension </div><div class="ttdef"><b>Definition:</b> tensor.h:505</div></div>
<div class="ttc" id="namespacemshadow_html_aac00a578d4eb8fc89263161eca8dc47b"><div class="ttname"><a href="namespacemshadow.html#aac00a578d4eb8fc89263161eca8dc47b">mshadow::ShutdownTensorEngine&lt; gpu &gt;</a></div><div class="ttdeci">void ShutdownTensorEngine&lt; gpu &gt;(void)</div><div class="ttdef"><b>Definition:</b> tensor_gpu-inl.h:49</div></div>
<div class="ttc" id="namespacemshadow_html_ad2320213151030353ae90f92a0da808d"><div class="ttname"><a href="namespacemshadow.html#ad2320213151030353ae90f92a0da808d">mshadow::AddTakeGradLargeBatch</a></div><div class="ttdeci">void AddTakeGradLargeBatch(Tensor&lt; cpu, 2, DType &gt; dst, const Tensor&lt; cpu, 1, IndexType &gt; &amp;sorted, const Tensor&lt; cpu, 1, IndexType &gt; &amp;index, const Tensor&lt; cpu, 2, DType &gt; &amp;src)</div><div class="ttdoc">CPU/GPU: Gradient accumulate of embedding matrix. dst[sorted[i]] += src[index[i]] Called when the bat...</div><div class="ttdef"><b>Definition:</b> tensor_cpu-inl.h:537</div></div>
<div class="ttc" id="structmshadow_1_1expr_1_1ShapeCheck_html"><div class="ttname"><a href="structmshadow_1_1expr_1_1ShapeCheck.html">mshadow::expr::ShapeCheck</a></div><div class="ttdoc">runtime shape checking template get the shape of an expression, report error if shape mismatch ...</div><div class="ttdef"><b>Definition:</b> expr_engine-inl.h:364</div></div>
<div class="ttc" id="namespacemshadow_html_a425ff81d201a5d6de2c507c11ee63869"><div class="ttname"><a href="namespacemshadow.html#a425ff81d201a5d6de2c507c11ee63869">mshadow::InitTensorEngine&lt; gpu &gt;</a></div><div class="ttdeci">void InitTensorEngine&lt; gpu &gt;(int dev_id)</div><div class="ttdef"><b>Definition:</b> tensor_gpu-inl.h:33</div></div>
<div class="ttc" id="namespacemshadow_html_a5fce5c2df842439cc7d2d7a90e2cf7d4"><div class="ttname"><a href="namespacemshadow.html#a5fce5c2df842439cc7d2d7a90e2cf7d4">mshadow::MapReduceKeepHighDim</a></div><div class="ttdeci">void MapReduceKeepHighDim(TRValue&lt; R, cpu, 1, DType &gt; *dst, const expr::Exp&lt; E, DType, etype &gt; &amp;exp, DType scale=1)</div><div class="ttdoc">CPU/GPU: map a expression, do reduction to 1D Tensor in third dimension (dimension 2) ...</div><div class="ttdef"><b>Definition:</b> tensor_cpu-inl.h:250</div></div>
<div class="ttc" id="structmshadow_1_1expr_1_1Exp_html"><div class="ttname"><a href="structmshadow_1_1expr_1_1Exp.html">mshadow::expr::Exp</a></div><div class="ttdoc">defines how expression exp can be evaluated and stored into dst </div><div class="ttdef"><b>Definition:</b> expression.h:79</div></div>
<div class="ttc" id="namespacemshadow_1_1expr_html_aefd008a0bf012dc8fb99d9bbcca7a078"><div class="ttname"><a href="namespacemshadow_1_1expr.html#aefd008a0bf012dc8fb99d9bbcca7a078">mshadow::expr::MakePlan</a></div><div class="ttdeci">Plan&lt; BinaryMapExp&lt; OP, TA, TB, DType, etype &gt;, DType &gt; MakePlan(const BinaryMapExp&lt; OP, TA, TB, DType, etype &gt; &amp;e)</div><div class="ttdef"><b>Definition:</b> expr_engine-inl.h:239</div></div>
<div class="ttc" id="namespacemshadow_html_ae3d85204767012fdd9fdddd27313e1fd"><div class="ttname"><a href="namespacemshadow.html#ae3d85204767012fdd9fdddd27313e1fd">mshadow::SetDevice&lt; gpu &gt;</a></div><div class="ttdeci">void SetDevice&lt; gpu &gt;(int devid)</div><div class="ttdef"><b>Definition:</b> tensor_gpu-inl.h:52</div></div>
<div class="ttc" id="namespacemshadow_html_a802d44c652195053e87b63881c7d4d1c"><div class="ttname"><a href="namespacemshadow.html#a802d44c652195053e87b63881c7d4d1c">mshadow::AddTakeGrad</a></div><div class="ttdeci">void AddTakeGrad(Tensor&lt; cpu, 2, DType &gt; dst, const Tensor&lt; cpu, 1, IndexType &gt; &amp;index, const Tensor&lt; cpu, 2, DType &gt; &amp;src)</div><div class="ttdoc">CPU/GPU: Gradient accumulate of embedding matrix. dst[index[i]] += src[i] Called when the featuredim ...</div><div class="ttdef"><b>Definition:</b> tensor_cpu-inl.h:516</div></div>
<div class="ttc" id="namespacemshadow_html"><div class="ttname"><a href="namespacemshadow.html">mshadow</a></div><div class="ttdoc">overloaded + operator between half_t and bf16_t </div><div class="ttdef"><b>Definition:</b> base.h:334</div></div>
<div class="ttc" id="structmshadow_1_1Tensor_html_afee556f188e29bbd0ecc45fe98d3c1c3"><div class="ttname"><a href="structmshadow_1_1Tensor.html#afee556f188e29bbd0ecc45fe98d3c1c3">mshadow::Tensor::stride_</a></div><div class="ttdeci">index_t stride_</div><div class="ttdoc">storing the stride information in x dimension this is used to deal with pitch allocation in gpu or ss...</div><div class="ttdef"><b>Definition:</b> tensor.h:441</div></div>
<div class="ttc" id="structmshadow_1_1Tensor_html"><div class="ttname"><a href="structmshadow_1_1Tensor.html">mshadow::Tensor</a></div><div class="ttdoc">general tensor </div><div class="ttdef"><b>Definition:</b> tensor.h:420</div></div>
<div class="ttc" id="3rdparty_2mshadow_2mshadow_2base_8h_html"><div class="ttname"><a href="3rdparty_2mshadow_2mshadow_2base_8h.html">base.h</a></div></div>
<div class="ttc" id="3rdparty_2mshadow_2mshadow_2base_8h_html_aaddfd904e59a1fc9fb07019cff73adee"><div class="ttname"><a href="3rdparty_2mshadow_2mshadow_2base_8h.html#aaddfd904e59a1fc9fb07019cff73adee">MSHADOW_MIN_PAD_RATIO</a></div><div class="ttdeci">#define MSHADOW_MIN_PAD_RATIO</div><div class="ttdoc">x dimension of data must be bigger pad_size * ratio to be alloced padded memory, otherwise use tide a...</div><div class="ttdef"><b>Definition:</b> base.h:83</div></div>
<div class="ttc" id="structmshadow_1_1Stream_html"><div class="ttname"><a href="structmshadow_1_1Stream.html">mshadow::Stream</a></div><div class="ttdoc">computaion stream structure, used for asynchronous computations </div><div class="ttdef"><b>Definition:</b> tensor.h:383</div></div>
</div><!-- fragment --></div><!-- contents -->
<!-- start footer part -->
<hr class="footer"/><address class="footer"><small>
Generated on Thu Jan 5 2023 00:58:42 for mxnet by &#160;<a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/>
</a> 1.8.13
</small></address>
</body>
</html>