blob: b8ba643e4b9bbc458c392ea44db04c5f9be043d2 [file] [log] [blame]
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.13"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<title>mxnet: /work/mxnet/src/common/cuda_utils.h Source File</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/searchdata.js"></script>
<script type="text/javascript" src="search/search.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<td id="projectalign" style="padding-left: 0.5em;">
<div id="projectname">mxnet
</div>
</td>
</tr>
</tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.13 -->
<script type="text/javascript">
var searchBox = new SearchBox("searchBox", "search",false,'Search');
</script>
<script type="text/javascript" src="menudata.js"></script>
<script type="text/javascript" src="menu.js"></script>
<script type="text/javascript">
$(function() {
initMenu('',true,false,'search.php','Search');
$(document).ready(function() { init_search(); });
});
</script>
<div id="main-nav"></div>
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
onkeydown="return searchBox.OnSearchSelectKey(event)">
</div>
<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0"
name="MSearchResults" id="MSearchResults">
</iframe>
</div>
<div id="nav-path" class="navpath">
<ul>
<li class="navelem"><a class="el" href="dir_68267d1309a1af8e8297ef4c3efbcdba.html">src</a></li><li class="navelem"><a class="el" href="dir_fdedb0aba14d44ce9d99bc100e026e6a.html">common</a></li> </ul>
</div>
</div><!-- top -->
<div class="header">
<div class="headertitle">
<div class="title">cuda_utils.h</div> </div>
</div><!--header-->
<div class="contents">
<a href="cuda__utils_8h.html">Go to the documentation of this file.</a><div class="fragment"><div class="line"><a name="l00001"></a><span class="lineno"> 1</span>&#160;<span class="comment">/*</span></div><div class="line"><a name="l00002"></a><span class="lineno"> 2</span>&#160;<span class="comment"> * Licensed to the Apache Software Foundation (ASF) under one</span></div><div class="line"><a name="l00003"></a><span class="lineno"> 3</span>&#160;<span class="comment"> * or more contributor license agreements. See the NOTICE file</span></div><div class="line"><a name="l00004"></a><span class="lineno"> 4</span>&#160;<span class="comment"> * distributed with this work for additional information</span></div><div class="line"><a name="l00005"></a><span class="lineno"> 5</span>&#160;<span class="comment"> * regarding copyright ownership. The ASF licenses this file</span></div><div class="line"><a name="l00006"></a><span class="lineno"> 6</span>&#160;<span class="comment"> * to you under the Apache License, Version 2.0 (the</span></div><div class="line"><a name="l00007"></a><span class="lineno"> 7</span>&#160;<span class="comment"> * &quot;License&quot;); you may not use this file except in compliance</span></div><div class="line"><a name="l00008"></a><span class="lineno"> 8</span>&#160;<span class="comment"> * with the License. You may obtain a copy of the License at</span></div><div class="line"><a name="l00009"></a><span class="lineno"> 9</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00010"></a><span class="lineno"> 10</span>&#160;<span class="comment"> * http://www.apache.org/licenses/LICENSE-2.0</span></div><div class="line"><a name="l00011"></a><span class="lineno"> 11</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00012"></a><span class="lineno"> 12</span>&#160;<span class="comment"> * Unless required by applicable law or agreed to in writing,</span></div><div class="line"><a name="l00013"></a><span class="lineno"> 13</span>&#160;<span class="comment"> * software distributed under the License is distributed on an</span></div><div class="line"><a name="l00014"></a><span class="lineno"> 14</span>&#160;<span class="comment"> * &quot;AS IS&quot; BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY</span></div><div class="line"><a name="l00015"></a><span class="lineno"> 15</span>&#160;<span class="comment"> * KIND, either express or implied. See the License for the</span></div><div class="line"><a name="l00016"></a><span class="lineno"> 16</span>&#160;<span class="comment"> * specific language governing permissions and limitations</span></div><div class="line"><a name="l00017"></a><span class="lineno"> 17</span>&#160;<span class="comment"> * under the License.</span></div><div class="line"><a name="l00018"></a><span class="lineno"> 18</span>&#160;<span class="comment"> */</span></div><div class="line"><a name="l00019"></a><span class="lineno"> 19</span>&#160;</div><div class="line"><a name="l00024"></a><span class="lineno"> 24</span>&#160;<span class="preprocessor">#ifndef MXNET_COMMON_CUDA_UTILS_H_</span></div><div class="line"><a name="l00025"></a><span class="lineno"> 25</span>&#160;<span class="preprocessor">#define MXNET_COMMON_CUDA_UTILS_H_</span></div><div class="line"><a name="l00026"></a><span class="lineno"> 26</span>&#160;</div><div class="line"><a name="l00027"></a><span class="lineno"> 27</span>&#160;<span class="preprocessor">#include &lt;dmlc/logging.h&gt;</span></div><div class="line"><a name="l00028"></a><span class="lineno"> 28</span>&#160;<span class="preprocessor">#include &lt;<a class="code" href="parameter_8h.html">dmlc/parameter.h</a>&gt;</span></div><div class="line"><a name="l00029"></a><span class="lineno"> 29</span>&#160;<span class="preprocessor">#include &lt;<a class="code" href="optional_8h.html">dmlc/optional.h</a>&gt;</span></div><div class="line"><a name="l00030"></a><span class="lineno"> 30</span>&#160;<span class="preprocessor">#include &lt;<a class="code" href="3rdparty_2mshadow_2mshadow_2base_8h.html">mshadow/base.h</a>&gt;</span></div><div class="line"><a name="l00031"></a><span class="lineno"> 31</span>&#160;<span class="preprocessor">#include &lt;<a class="code" href="libinfo_8h.html">mxnet/libinfo.h</a>&gt;</span></div><div class="line"><a name="l00032"></a><span class="lineno"> 32</span>&#160;</div><div class="line"><a name="l00034"></a><span class="lineno"> 34</span>&#160;<span class="preprocessor">#ifdef __JETBRAINS_IDE__</span></div><div class="line"><a name="l00035"></a><span class="lineno"> 35</span>&#160;<span class="preprocessor">#define __CUDACC__ 1</span></div><div class="line"><a name="l00036"></a><span class="lineno"> 36</span>&#160;<span class="preprocessor">#define __host__</span></div><div class="line"><a name="l00037"></a><span class="lineno"> 37</span>&#160;<span class="preprocessor">#define __device__</span></div><div class="line"><a name="l00038"></a><span class="lineno"> 38</span>&#160;<span class="preprocessor">#define __global__</span></div><div class="line"><a name="l00039"></a><span class="lineno"> 39</span>&#160;<span class="preprocessor">#define __forceinline__</span></div><div class="line"><a name="l00040"></a><span class="lineno"> 40</span>&#160;<span class="preprocessor">#define __shared__</span></div><div class="line"><a name="l00041"></a><span class="lineno"> 41</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> __syncthreads() {}</div><div class="line"><a name="l00042"></a><span class="lineno"> 42</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> __threadfence_block() {}</div><div class="line"><a name="l00043"></a><span class="lineno"> 43</span>&#160;<span class="keyword">template</span>&lt;<span class="keyword">class</span> T&gt; <span class="keyword">inline</span> T __clz(<span class="keyword">const</span> T val) { <span class="keywordflow">return</span> val; }</div><div class="line"><a name="l00044"></a><span class="lineno"> 44</span>&#160;<span class="keyword">struct </span>__cuda_fake_struct { <span class="keywordtype">int</span> x; <span class="keywordtype">int</span> y; <span class="keywordtype">int</span> z; };</div><div class="line"><a name="l00045"></a><span class="lineno"> 45</span>&#160;<span class="keyword">extern</span> __cuda_fake_struct blockDim;</div><div class="line"><a name="l00046"></a><span class="lineno"> 46</span>&#160;<span class="keyword">extern</span> __cuda_fake_struct threadIdx;</div><div class="line"><a name="l00047"></a><span class="lineno"> 47</span>&#160;<span class="keyword">extern</span> __cuda_fake_struct blockIdx;</div><div class="line"><a name="l00048"></a><span class="lineno"> 48</span>&#160;<span class="preprocessor">#endif</span></div><div class="line"><a name="l00049"></a><span class="lineno"> 49</span>&#160;</div><div class="line"><a name="l00050"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a2117b58e19182dff91ad3558e650541d"> 50</a></span>&#160;<span class="preprocessor">#define QUOTE(x) #x</span></div><div class="line"><a name="l00051"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a257a331aabc15f6c701df3cff96f1b10"> 51</a></span>&#160;<span class="preprocessor">#define QUOTEVALUE(x) QUOTE(x)</span></div><div class="line"><a name="l00052"></a><span class="lineno"> 52</span>&#160;</div><div class="line"><a name="l00053"></a><span class="lineno"> 53</span>&#160;<span class="preprocessor">#if MXNET_USE_CUDA</span></div><div class="line"><a name="l00054"></a><span class="lineno"> 54</span>&#160;</div><div class="line"><a name="l00055"></a><span class="lineno"> 55</span>&#160;<span class="preprocessor">#include &lt;cuda_runtime.h&gt;</span></div><div class="line"><a name="l00056"></a><span class="lineno"> 56</span>&#160;<span class="preprocessor">#include &lt;cublas_v2.h&gt;</span></div><div class="line"><a name="l00057"></a><span class="lineno"> 57</span>&#160;<span class="preprocessor">#include &lt;curand.h&gt;</span></div><div class="line"><a name="l00058"></a><span class="lineno"> 58</span>&#160;</div><div class="line"><a name="l00059"></a><span class="lineno"> 59</span>&#160;<span class="preprocessor">#include &lt;vector&gt;</span></div><div class="line"><a name="l00060"></a><span class="lineno"> 60</span>&#160;</div><div class="line"><a name="l00061"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#ac2d16cdf196c75879d4acda60406e0ef"> 61</a></span>&#160;<span class="preprocessor">#define STATIC_ASSERT_CUDA_VERSION_GE(min_version) \</span></div><div class="line"><a name="l00062"></a><span class="lineno"> 62</span>&#160;<span class="preprocessor"> static_assert(CUDA_VERSION &gt;= min_version, &quot;Compiled-against CUDA version &quot; \</span></div><div class="line"><a name="l00063"></a><span class="lineno"> 63</span>&#160;<span class="preprocessor"> QUOTEVALUE(CUDA_VERSION) &quot; is too old, please upgrade system to version &quot; \</span></div><div class="line"><a name="l00064"></a><span class="lineno"> 64</span>&#160;<span class="preprocessor"> QUOTEVALUE(min_version) &quot; or later.&quot;)</span></div><div class="line"><a name="l00065"></a><span class="lineno"> 65</span>&#160;</div><div class="line"><a name="l00070"></a><span class="lineno"> 70</span>&#160;<span class="preprocessor">#ifdef __CUDACC__</span></div><div class="line"><a name="l00071"></a><span class="lineno"> 71</span>&#160;<span class="keyword">inline</span> __device__ <span class="keywordtype">bool</span> __is_supported_cuda_architecture() {</div><div class="line"><a name="l00072"></a><span class="lineno"> 72</span>&#160;<span class="preprocessor">#if defined(__CUDA_ARCH__) &amp;&amp; __CUDA_ARCH__ &lt; 300</span></div><div class="line"><a name="l00073"></a><span class="lineno"> 73</span>&#160;<span class="preprocessor">#error &quot;Fermi and earlier GPU architectures are not supported (architecture versions less than 3.0)&quot;</span></div><div class="line"><a name="l00074"></a><span class="lineno"> 74</span>&#160; <span class="keywordflow">return</span> <span class="keyword">false</span>;</div><div class="line"><a name="l00075"></a><span class="lineno"> 75</span>&#160;<span class="preprocessor">#else</span></div><div class="line"><a name="l00076"></a><span class="lineno"> 76</span>&#160; <span class="keywordflow">return</span> <span class="keyword">true</span>;</div><div class="line"><a name="l00077"></a><span class="lineno"> 77</span>&#160;<span class="preprocessor">#endif // __CUDA_ARCH__ &lt; 300</span></div><div class="line"><a name="l00078"></a><span class="lineno"> 78</span>&#160;}</div><div class="line"><a name="l00079"></a><span class="lineno"> 79</span>&#160;<span class="preprocessor">#endif // __CUDACC__</span></div><div class="line"><a name="l00080"></a><span class="lineno"> 80</span>&#160;</div><div class="line"><a name="l00085"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#afc69a418242c5b851993bc2307b1c897"> 85</a></span>&#160;<span class="preprocessor">#define CHECK_CUDA_ERROR(msg) \</span></div><div class="line"><a name="l00086"></a><span class="lineno"> 86</span>&#160;<span class="preprocessor"> { \</span></div><div class="line"><a name="l00087"></a><span class="lineno"> 87</span>&#160;<span class="preprocessor"> cudaError_t e = cudaGetLastError(); \</span></div><div class="line"><a name="l00088"></a><span class="lineno"> 88</span>&#160;<span class="preprocessor"> CHECK_EQ(e, cudaSuccess) &lt;&lt; (msg) &lt;&lt; &quot; CUDA: &quot; &lt;&lt; cudaGetErrorString(e); \</span></div><div class="line"><a name="l00089"></a><span class="lineno"> 89</span>&#160;<span class="preprocessor"> }</span></div><div class="line"><a name="l00090"></a><span class="lineno"> 90</span>&#160;</div><div class="line"><a name="l00097"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a06cc7d24ca66505e69f5ad40009f5e8d"> 97</a></span>&#160;<span class="preprocessor">#define CUDA_CALL(func) \</span></div><div class="line"><a name="l00098"></a><span class="lineno"> 98</span>&#160;<span class="preprocessor"> { \</span></div><div class="line"><a name="l00099"></a><span class="lineno"> 99</span>&#160;<span class="preprocessor"> cudaError_t e = (func); \</span></div><div class="line"><a name="l00100"></a><span class="lineno"> 100</span>&#160;<span class="preprocessor"> CHECK(e == cudaSuccess || e == cudaErrorCudartUnloading) \</span></div><div class="line"><a name="l00101"></a><span class="lineno"> 101</span>&#160;<span class="preprocessor"> &lt;&lt; &quot;CUDA: &quot; &lt;&lt; cudaGetErrorString(e); \</span></div><div class="line"><a name="l00102"></a><span class="lineno"> 102</span>&#160;<span class="preprocessor"> }</span></div><div class="line"><a name="l00103"></a><span class="lineno"> 103</span>&#160;</div><div class="line"><a name="l00110"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a685d7ca3c9370ff471665abcacdeb381"> 110</a></span>&#160;<span class="preprocessor">#define CUBLAS_CALL(func) \</span></div><div class="line"><a name="l00111"></a><span class="lineno"> 111</span>&#160;<span class="preprocessor"> { \</span></div><div class="line"><a name="l00112"></a><span class="lineno"> 112</span>&#160;<span class="preprocessor"> cublasStatus_t e = (func); \</span></div><div class="line"><a name="l00113"></a><span class="lineno"> 113</span>&#160;<span class="preprocessor"> CHECK_EQ(e, CUBLAS_STATUS_SUCCESS) \</span></div><div class="line"><a name="l00114"></a><span class="lineno"> 114</span>&#160;<span class="preprocessor"> &lt;&lt; &quot;cuBLAS: &quot; &lt;&lt; mxnet::common::cuda::CublasGetErrorString(e); \</span></div><div class="line"><a name="l00115"></a><span class="lineno"> 115</span>&#160;<span class="preprocessor"> }</span></div><div class="line"><a name="l00116"></a><span class="lineno"> 116</span>&#160;</div><div class="line"><a name="l00123"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#ab38940ff6950f84102baa4573675b670"> 123</a></span>&#160;<span class="preprocessor">#define CUSOLVER_CALL(func) \</span></div><div class="line"><a name="l00124"></a><span class="lineno"> 124</span>&#160;<span class="preprocessor"> { \</span></div><div class="line"><a name="l00125"></a><span class="lineno"> 125</span>&#160;<span class="preprocessor"> cusolverStatus_t e = (func); \</span></div><div class="line"><a name="l00126"></a><span class="lineno"> 126</span>&#160;<span class="preprocessor"> CHECK_EQ(e, CUSOLVER_STATUS_SUCCESS) \</span></div><div class="line"><a name="l00127"></a><span class="lineno"> 127</span>&#160;<span class="preprocessor"> &lt;&lt; &quot;cuSolver: &quot; &lt;&lt; mxnet::common::cuda::CusolverGetErrorString(e); \</span></div><div class="line"><a name="l00128"></a><span class="lineno"> 128</span>&#160;<span class="preprocessor"> }</span></div><div class="line"><a name="l00129"></a><span class="lineno"> 129</span>&#160;</div><div class="line"><a name="l00136"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a82d7233550780a8c186e79c24aed8406"> 136</a></span>&#160;<span class="preprocessor">#define CURAND_CALL(func) \</span></div><div class="line"><a name="l00137"></a><span class="lineno"> 137</span>&#160;<span class="preprocessor"> { \</span></div><div class="line"><a name="l00138"></a><span class="lineno"> 138</span>&#160;<span class="preprocessor"> curandStatus_t e = (func); \</span></div><div class="line"><a name="l00139"></a><span class="lineno"> 139</span>&#160;<span class="preprocessor"> CHECK_EQ(e, CURAND_STATUS_SUCCESS) \</span></div><div class="line"><a name="l00140"></a><span class="lineno"> 140</span>&#160;<span class="preprocessor"> &lt;&lt; &quot;cuRAND: &quot; &lt;&lt; mxnet::common::cuda::CurandGetErrorString(e); \</span></div><div class="line"><a name="l00141"></a><span class="lineno"> 141</span>&#160;<span class="preprocessor"> }</span></div><div class="line"><a name="l00142"></a><span class="lineno"> 142</span>&#160;</div><div class="line"><a name="l00149"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a63b6d263b94df9e33474894ad02b792d"> 149</a></span>&#160;<span class="preprocessor">#define NVRTC_CALL(x) \</span></div><div class="line"><a name="l00150"></a><span class="lineno"> 150</span>&#160;<span class="preprocessor"> { \</span></div><div class="line"><a name="l00151"></a><span class="lineno"> 151</span>&#160;<span class="preprocessor"> nvrtcResult result = x; \</span></div><div class="line"><a name="l00152"></a><span class="lineno"> 152</span>&#160;<span class="preprocessor"> CHECK_EQ(result, NVRTC_SUCCESS) \</span></div><div class="line"><a name="l00153"></a><span class="lineno"> 153</span>&#160;<span class="preprocessor"> &lt;&lt; #x &quot; failed with error &quot; \</span></div><div class="line"><a name="l00154"></a><span class="lineno"> 154</span>&#160;<span class="preprocessor"> &lt;&lt; nvrtcGetErrorString(result); \</span></div><div class="line"><a name="l00155"></a><span class="lineno"> 155</span>&#160;<span class="preprocessor"> }</span></div><div class="line"><a name="l00156"></a><span class="lineno"> 156</span>&#160;</div><div class="line"><a name="l00163"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a0d9b08b9ef45122c54bf5a121aeab5c3"> 163</a></span>&#160;<span class="preprocessor">#define CUDA_DRIVER_CALL(func) \</span></div><div class="line"><a name="l00164"></a><span class="lineno"> 164</span>&#160;<span class="preprocessor"> { \</span></div><div class="line"><a name="l00165"></a><span class="lineno"> 165</span>&#160;<span class="preprocessor"> CUresult e = (func); \</span></div><div class="line"><a name="l00166"></a><span class="lineno"> 166</span>&#160;<span class="preprocessor"> if (e != CUDA_SUCCESS) { \</span></div><div class="line"><a name="l00167"></a><span class="lineno"> 167</span>&#160;<span class="preprocessor"> char const * err_msg = nullptr; \</span></div><div class="line"><a name="l00168"></a><span class="lineno"> 168</span>&#160;<span class="preprocessor"> if (cuGetErrorString(e, &amp;err_msg) == CUDA_ERROR_INVALID_VALUE) { \</span></div><div class="line"><a name="l00169"></a><span class="lineno"> 169</span>&#160;<span class="preprocessor"> LOG(FATAL) &lt;&lt; &quot;CUDA Driver: Unknown error &quot; &lt;&lt; e; \</span></div><div class="line"><a name="l00170"></a><span class="lineno"> 170</span>&#160;<span class="preprocessor"> } else { \</span></div><div class="line"><a name="l00171"></a><span class="lineno"> 171</span>&#160;<span class="preprocessor"> LOG(FATAL) &lt;&lt; &quot;CUDA Driver: &quot; &lt;&lt; err_msg; \</span></div><div class="line"><a name="l00172"></a><span class="lineno"> 172</span>&#160;<span class="preprocessor"> } \</span></div><div class="line"><a name="l00173"></a><span class="lineno"> 173</span>&#160;<span class="preprocessor"> } \</span></div><div class="line"><a name="l00174"></a><span class="lineno"> 174</span>&#160;<span class="preprocessor"> }</span></div><div class="line"><a name="l00175"></a><span class="lineno"> 175</span>&#160;</div><div class="line"><a name="l00176"></a><span class="lineno"> 176</span>&#160;</div><div class="line"><a name="l00177"></a><span class="lineno"> 177</span>&#160;<span class="preprocessor">#if !defined(_MSC_VER)</span></div><div class="line"><a name="l00178"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a685e3713856baaafb1d4edea43725c83"> 178</a></span>&#160;<span class="preprocessor">#define CUDA_UNROLL _Pragma(&quot;unroll&quot;)</span></div><div class="line"><a name="l00179"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#addb314f15d765a2ba72ae37dab23c03b"> 179</a></span>&#160;<span class="preprocessor">#define CUDA_NOUNROLL _Pragma(&quot;nounroll&quot;)</span></div><div class="line"><a name="l00180"></a><span class="lineno"> 180</span>&#160;<span class="preprocessor">#else</span></div><div class="line"><a name="l00181"></a><span class="lineno"> 181</span>&#160;<span class="preprocessor">#define CUDA_UNROLL</span></div><div class="line"><a name="l00182"></a><span class="lineno"> 182</span>&#160;<span class="preprocessor">#define CUDA_NOUNROLL</span></div><div class="line"><a name="l00183"></a><span class="lineno"> 183</span>&#160;<span class="preprocessor">#endif</span></div><div class="line"><a name="l00184"></a><span class="lineno"> 184</span>&#160;</div><div class="line"><a name="l00185"></a><span class="lineno"> 185</span>&#160;<span class="keyword">namespace </span><a class="code" href="namespacemxnet.html">mxnet</a> {</div><div class="line"><a name="l00186"></a><span class="lineno"> 186</span>&#160;<span class="keyword">namespace </span>common {</div><div class="line"><a name="l00188"></a><span class="lineno"><a class="line" href="namespacemxnet_1_1common_1_1cuda.html"> 188</a></span>&#160;<span class="keyword">namespace </span>cuda {</div><div class="line"><a name="l00192"></a><span class="lineno"> 192</span>&#160;<span class="keyword">template</span>&lt;<span class="keyword">typename</span> DType&gt;</div><div class="line"><a name="l00193"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType.html"> 193</a></span>&#160;<span class="keyword">struct </span><a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType.html">CublasType</a>;</div><div class="line"><a name="l00194"></a><span class="lineno"> 194</span>&#160;</div><div class="line"><a name="l00195"></a><span class="lineno"> 195</span>&#160;<span class="comment">// With CUDA v8, cuBLAS adopted use of cudaDataType_t instead of its own</span></div><div class="line"><a name="l00196"></a><span class="lineno"> 196</span>&#160;<span class="comment">// datatype cublasDataType_t. The older cudaDataType_t values could be</span></div><div class="line"><a name="l00197"></a><span class="lineno"> 197</span>&#160;<span class="comment">// included below, but since this class was introduced to support the cuBLAS v8</span></div><div class="line"><a name="l00198"></a><span class="lineno"> 198</span>&#160;<span class="comment">// call cublasGemmEx(), burdening the class with the legacy type values</span></div><div class="line"><a name="l00199"></a><span class="lineno"> 199</span>&#160;<span class="comment">// was not needed.</span></div><div class="line"><a name="l00200"></a><span class="lineno"> 200</span>&#160;</div><div class="line"><a name="l00201"></a><span class="lineno"> 201</span>&#160;<span class="keyword">template</span>&lt;&gt;</div><div class="line"><a name="l00202"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html"> 202</a></span>&#160;<span class="keyword">struct </span><a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType.html">CublasType</a>&lt;float&gt; {</div><div class="line"><a name="l00203"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html#a36982d0c4b16568b641c6fcd0afec49b"> 203</a></span>&#160; <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">int</span> kFlag = <a class="code" href="namespacemshadow.html#a936bbfe6aeead8902973c098b87f18c1a404a5fd26328cf46170f6eb3424c9633">mshadow::kFloat32</a>;</div><div class="line"><a name="l00204"></a><span class="lineno"> 204</span>&#160;<span class="preprocessor">#if CUDA_VERSION &gt;= 8000</span></div><div class="line"><a name="l00205"></a><span class="lineno"> 205</span>&#160; <span class="keyword">static</span> <span class="keyword">const</span> cudaDataType_t kCudaFlag = CUDA_R_32F;</div><div class="line"><a name="l00206"></a><span class="lineno"> 206</span>&#160;<span class="preprocessor">#endif</span></div><div class="line"><a name="l00207"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html#a735caccec4d080a0fd7e1bf88a727955"> 207</a></span>&#160; <span class="keyword">typedef</span> <span class="keywordtype">float</span> <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html#a735caccec4d080a0fd7e1bf88a727955">ScaleType</a>;</div><div class="line"><a name="l00208"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html#a437fb574fefbe87d2add1289074b194a"> 208</a></span>&#160; <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">float</span> <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html#a437fb574fefbe87d2add1289074b194a">one</a>;</div><div class="line"><a name="l00209"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html#a98a73866e9513d63627f935531456ca7"> 209</a></span>&#160; <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">float</span> <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html#a98a73866e9513d63627f935531456ca7">zero</a>;</div><div class="line"><a name="l00210"></a><span class="lineno"> 210</span>&#160;};</div><div class="line"><a name="l00211"></a><span class="lineno"> 211</span>&#160;<span class="keyword">template</span>&lt;&gt;</div><div class="line"><a name="l00212"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html"> 212</a></span>&#160;<span class="keyword">struct </span><a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType.html">CublasType</a>&lt;double&gt; {</div><div class="line"><a name="l00213"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html#a479d4f2ff7b9186dfe4d81cd6ce36d4c"> 213</a></span>&#160; <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">int</span> kFlag = <a class="code" href="namespacemshadow.html#a936bbfe6aeead8902973c098b87f18c1a1f5a1c62216cbd2200443d501924cf28">mshadow::kFloat64</a>;</div><div class="line"><a name="l00214"></a><span class="lineno"> 214</span>&#160;<span class="preprocessor">#if CUDA_VERSION &gt;= 8000</span></div><div class="line"><a name="l00215"></a><span class="lineno"> 215</span>&#160; <span class="keyword">static</span> <span class="keyword">const</span> cudaDataType_t kCudaFlag = CUDA_R_64F;</div><div class="line"><a name="l00216"></a><span class="lineno"> 216</span>&#160;<span class="preprocessor">#endif</span></div><div class="line"><a name="l00217"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html#a46da9bddaa921bd38ec1c90a975972fe"> 217</a></span>&#160; <span class="keyword">typedef</span> <span class="keywordtype">double</span> <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html#a46da9bddaa921bd38ec1c90a975972fe">ScaleType</a>;</div><div class="line"><a name="l00218"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html#a17c4026782d6a86b7d11aae44b684969"> 218</a></span>&#160; <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">double</span> <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html#a17c4026782d6a86b7d11aae44b684969">one</a>;</div><div class="line"><a name="l00219"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html#acb859823c71c7c2aeeb55de510dcb1b4"> 219</a></span>&#160; <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">double</span> <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html#acb859823c71c7c2aeeb55de510dcb1b4">zero</a>;</div><div class="line"><a name="l00220"></a><span class="lineno"> 220</span>&#160;};</div><div class="line"><a name="l00221"></a><span class="lineno"> 221</span>&#160;<span class="keyword">template</span>&lt;&gt;</div><div class="line"><a name="l00222"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html"> 222</a></span>&#160;<span class="keyword">struct </span><a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType.html">CublasType</a>&lt;<a class="code" href="namespacemshadow.html">mshadow</a>::half::half_t&gt; {</div><div class="line"><a name="l00223"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html#aff8ea7d6270e903b93102223dd3541ba"> 223</a></span>&#160; <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">int</span> kFlag = <a class="code" href="namespacemshadow.html#a936bbfe6aeead8902973c098b87f18c1a37ab9e42757689b17620f5728296d5d4">mshadow::kFloat16</a>;</div><div class="line"><a name="l00224"></a><span class="lineno"> 224</span>&#160;<span class="preprocessor">#if CUDA_VERSION &gt;= 8000</span></div><div class="line"><a name="l00225"></a><span class="lineno"> 225</span>&#160; <span class="keyword">static</span> <span class="keyword">const</span> cudaDataType_t kCudaFlag = CUDA_R_16F;</div><div class="line"><a name="l00226"></a><span class="lineno"> 226</span>&#160;<span class="preprocessor">#endif</span></div><div class="line"><a name="l00227"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html#a707d99741473be6edc5f4c345690e9ee"> 227</a></span>&#160; <span class="keyword">typedef</span> <span class="keywordtype">float</span> <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html#a707d99741473be6edc5f4c345690e9ee">ScaleType</a>;</div><div class="line"><a name="l00228"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html#acf8d06465837aa6ee31e125e6eeda87c"> 228</a></span>&#160; <span class="keyword">static</span> <span class="keyword">const</span> mshadow::half::half_t <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html#acf8d06465837aa6ee31e125e6eeda87c">one</a>;</div><div class="line"><a name="l00229"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html#ae8222ef1a6cba23c5f196393d74c45ce"> 229</a></span>&#160; <span class="keyword">static</span> <span class="keyword">const</span> mshadow::half::half_t <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html#ae8222ef1a6cba23c5f196393d74c45ce">zero</a>;</div><div class="line"><a name="l00230"></a><span class="lineno"> 230</span>&#160;};</div><div class="line"><a name="l00231"></a><span class="lineno"> 231</span>&#160;<span class="keyword">template</span>&lt;&gt;</div><div class="line"><a name="l00232"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01uint8__t_01_4.html"> 232</a></span>&#160;<span class="keyword">struct </span><a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType.html">CublasType</a>&lt;uint8_t&gt; {</div><div class="line"><a name="l00233"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01uint8__t_01_4.html#aaf4a22c7533da6a79bf1c06e0c937cc5"> 233</a></span>&#160; <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">int</span> kFlag = <a class="code" href="namespacemshadow.html#a936bbfe6aeead8902973c098b87f18c1a1a39d2f8230da3cb53528904c8a5fff0">mshadow::kUint8</a>;</div><div class="line"><a name="l00234"></a><span class="lineno"> 234</span>&#160;<span class="preprocessor">#if CUDA_VERSION &gt;= 8000</span></div><div class="line"><a name="l00235"></a><span class="lineno"> 235</span>&#160; <span class="keyword">static</span> <span class="keyword">const</span> cudaDataType_t kCudaFlag = CUDA_R_8I;</div><div class="line"><a name="l00236"></a><span class="lineno"> 236</span>&#160;<span class="preprocessor">#endif</span></div><div class="line"><a name="l00237"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01uint8__t_01_4.html#a3af01d0a12763530b9568e836c4655e0"> 237</a></span>&#160; <span class="keyword">typedef</span> uint8_t <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01uint8__t_01_4.html#a3af01d0a12763530b9568e836c4655e0">ScaleType</a>;</div><div class="line"><a name="l00238"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01uint8__t_01_4.html#a3fde539928c0e7b776dce38ffbf50e94"> 238</a></span>&#160; <span class="keyword">static</span> <span class="keyword">const</span> uint8_t one = 1;</div><div class="line"><a name="l00239"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01uint8__t_01_4.html#a05d6f7ce44f65f2dee8d919005359ad8"> 239</a></span>&#160; <span class="keyword">static</span> <span class="keyword">const</span> uint8_t zero = 0;</div><div class="line"><a name="l00240"></a><span class="lineno"> 240</span>&#160;};</div><div class="line"><a name="l00241"></a><span class="lineno"> 241</span>&#160;<span class="keyword">template</span>&lt;&gt;</div><div class="line"><a name="l00242"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01int32__t_01_4.html"> 242</a></span>&#160;<span class="keyword">struct </span><a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType.html">CublasType</a>&lt;int32_t&gt; {</div><div class="line"><a name="l00243"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01int32__t_01_4.html#ac12d3826bcfd3207cc9ccec15365630e"> 243</a></span>&#160; <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">int</span> kFlag = <a class="code" href="namespacemshadow.html#a936bbfe6aeead8902973c098b87f18c1a4fbb02e389c3126918b505cd01188368">mshadow::kInt32</a>;</div><div class="line"><a name="l00244"></a><span class="lineno"> 244</span>&#160;<span class="preprocessor">#if CUDA_VERSION &gt;= 8000</span></div><div class="line"><a name="l00245"></a><span class="lineno"> 245</span>&#160; <span class="keyword">static</span> <span class="keyword">const</span> cudaDataType_t kCudaFlag = CUDA_R_32I;</div><div class="line"><a name="l00246"></a><span class="lineno"> 246</span>&#160;<span class="preprocessor">#endif</span></div><div class="line"><a name="l00247"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01int32__t_01_4.html#a237f23f560dad8c0299c11a14f1dee23"> 247</a></span>&#160; <span class="keyword">typedef</span> int32_t <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01int32__t_01_4.html#a237f23f560dad8c0299c11a14f1dee23">ScaleType</a>;</div><div class="line"><a name="l00248"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01int32__t_01_4.html#aaed4ff3ebff77d570c87a02ef40c90c0"> 248</a></span>&#160; <span class="keyword">static</span> <span class="keyword">const</span> int32_t one = 1;</div><div class="line"><a name="l00249"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01int32__t_01_4.html#afd9ad4f6ea376d89dfb5f9c77edf8eba"> 249</a></span>&#160; <span class="keyword">static</span> <span class="keyword">const</span> int32_t zero = 0;</div><div class="line"><a name="l00250"></a><span class="lineno"> 250</span>&#160;};</div><div class="line"><a name="l00251"></a><span class="lineno"> 251</span>&#160;</div><div class="line"><a name="l00257"></a><span class="lineno"><a class="line" href="namespacemxnet_1_1common_1_1cuda.html#a9feee613a4f16a954dd68e55345a72ac"> 257</a></span>&#160;<span class="keyword">inline</span> <span class="keyword">const</span> <span class="keywordtype">char</span>* <a class="code" href="namespacemxnet_1_1common_1_1cuda.html#a9feee613a4f16a954dd68e55345a72ac">CublasGetErrorString</a>(cublasStatus_t error) {</div><div class="line"><a name="l00258"></a><span class="lineno"> 258</span>&#160; <span class="keywordflow">switch</span> (error) {</div><div class="line"><a name="l00259"></a><span class="lineno"> 259</span>&#160; <span class="keywordflow">case</span> CUBLAS_STATUS_SUCCESS:</div><div class="line"><a name="l00260"></a><span class="lineno"> 260</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CUBLAS_STATUS_SUCCESS&quot;</span>;</div><div class="line"><a name="l00261"></a><span class="lineno"> 261</span>&#160; <span class="keywordflow">case</span> CUBLAS_STATUS_NOT_INITIALIZED:</div><div class="line"><a name="l00262"></a><span class="lineno"> 262</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CUBLAS_STATUS_NOT_INITIALIZED&quot;</span>;</div><div class="line"><a name="l00263"></a><span class="lineno"> 263</span>&#160; <span class="keywordflow">case</span> CUBLAS_STATUS_ALLOC_FAILED:</div><div class="line"><a name="l00264"></a><span class="lineno"> 264</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CUBLAS_STATUS_ALLOC_FAILED&quot;</span>;</div><div class="line"><a name="l00265"></a><span class="lineno"> 265</span>&#160; <span class="keywordflow">case</span> CUBLAS_STATUS_INVALID_VALUE:</div><div class="line"><a name="l00266"></a><span class="lineno"> 266</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CUBLAS_STATUS_INVALID_VALUE&quot;</span>;</div><div class="line"><a name="l00267"></a><span class="lineno"> 267</span>&#160; <span class="keywordflow">case</span> CUBLAS_STATUS_ARCH_MISMATCH:</div><div class="line"><a name="l00268"></a><span class="lineno"> 268</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CUBLAS_STATUS_ARCH_MISMATCH&quot;</span>;</div><div class="line"><a name="l00269"></a><span class="lineno"> 269</span>&#160; <span class="keywordflow">case</span> CUBLAS_STATUS_MAPPING_ERROR:</div><div class="line"><a name="l00270"></a><span class="lineno"> 270</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CUBLAS_STATUS_MAPPING_ERROR&quot;</span>;</div><div class="line"><a name="l00271"></a><span class="lineno"> 271</span>&#160; <span class="keywordflow">case</span> CUBLAS_STATUS_EXECUTION_FAILED:</div><div class="line"><a name="l00272"></a><span class="lineno"> 272</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CUBLAS_STATUS_EXECUTION_FAILED&quot;</span>;</div><div class="line"><a name="l00273"></a><span class="lineno"> 273</span>&#160; <span class="keywordflow">case</span> CUBLAS_STATUS_INTERNAL_ERROR:</div><div class="line"><a name="l00274"></a><span class="lineno"> 274</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CUBLAS_STATUS_INTERNAL_ERROR&quot;</span>;</div><div class="line"><a name="l00275"></a><span class="lineno"> 275</span>&#160; <span class="keywordflow">case</span> CUBLAS_STATUS_NOT_SUPPORTED:</div><div class="line"><a name="l00276"></a><span class="lineno"> 276</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CUBLAS_STATUS_NOT_SUPPORTED&quot;</span>;</div><div class="line"><a name="l00277"></a><span class="lineno"> 277</span>&#160; <span class="keywordflow">default</span>:</div><div class="line"><a name="l00278"></a><span class="lineno"> 278</span>&#160; <span class="keywordflow">break</span>;</div><div class="line"><a name="l00279"></a><span class="lineno"> 279</span>&#160; }</div><div class="line"><a name="l00280"></a><span class="lineno"> 280</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;Unknown cuBLAS status&quot;</span>;</div><div class="line"><a name="l00281"></a><span class="lineno"> 281</span>&#160;}</div><div class="line"><a name="l00282"></a><span class="lineno"> 282</span>&#160;</div><div class="line"><a name="l00283"></a><span class="lineno"> 283</span>&#160;<span class="preprocessor">#if CUDA_VERSION &gt;= 8000</span></div><div class="line"><a name="l00284"></a><span class="lineno"> 284</span>&#160;</div><div class="line"><a name="l00289"></a><span class="lineno"> 289</span>&#160;<span class="keyword">inline</span> cublasOperation_t CublasTransposeOp(<span class="keywordtype">bool</span> <a class="code" href="namespacemshadow_1_1expr.html#afc62edfb800bb19e201b20b444831af3">transpose</a>) {</div><div class="line"><a name="l00290"></a><span class="lineno"> 290</span>&#160; <span class="keywordflow">return</span> transpose ? CUBLAS_OP_T : CUBLAS_OP_N;</div><div class="line"><a name="l00291"></a><span class="lineno"> 291</span>&#160;}</div><div class="line"><a name="l00292"></a><span class="lineno"> 292</span>&#160;<span class="preprocessor">#endif</span></div><div class="line"><a name="l00293"></a><span class="lineno"> 293</span>&#160;</div><div class="line"><a name="l00299"></a><span class="lineno"><a class="line" href="namespacemxnet_1_1common_1_1cuda.html#abf9bcb4cb696e9ae61b818510dac39c8"> 299</a></span>&#160;<span class="keyword">inline</span> <span class="keyword">const</span> <span class="keywordtype">char</span>* <a class="code" href="namespacemxnet_1_1common_1_1cuda.html#abf9bcb4cb696e9ae61b818510dac39c8">CusolverGetErrorString</a>(cusolverStatus_t error) {</div><div class="line"><a name="l00300"></a><span class="lineno"> 300</span>&#160; <span class="keywordflow">switch</span> (error) {</div><div class="line"><a name="l00301"></a><span class="lineno"> 301</span>&#160; <span class="keywordflow">case</span> CUSOLVER_STATUS_SUCCESS:</div><div class="line"><a name="l00302"></a><span class="lineno"> 302</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CUSOLVER_STATUS_SUCCESS&quot;</span>;</div><div class="line"><a name="l00303"></a><span class="lineno"> 303</span>&#160; <span class="keywordflow">case</span> CUSOLVER_STATUS_NOT_INITIALIZED:</div><div class="line"><a name="l00304"></a><span class="lineno"> 304</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CUSOLVER_STATUS_NOT_INITIALIZED&quot;</span>;</div><div class="line"><a name="l00305"></a><span class="lineno"> 305</span>&#160; <span class="keywordflow">case</span> CUSOLVER_STATUS_ALLOC_FAILED:</div><div class="line"><a name="l00306"></a><span class="lineno"> 306</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CUSOLVER_STATUS_ALLOC_FAILED&quot;</span>;</div><div class="line"><a name="l00307"></a><span class="lineno"> 307</span>&#160; <span class="keywordflow">case</span> CUSOLVER_STATUS_INVALID_VALUE:</div><div class="line"><a name="l00308"></a><span class="lineno"> 308</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CUSOLVER_STATUS_INVALID_VALUE&quot;</span>;</div><div class="line"><a name="l00309"></a><span class="lineno"> 309</span>&#160; <span class="keywordflow">case</span> CUSOLVER_STATUS_ARCH_MISMATCH:</div><div class="line"><a name="l00310"></a><span class="lineno"> 310</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CUSOLVER_STATUS_ARCH_MISMATCH&quot;</span>;</div><div class="line"><a name="l00311"></a><span class="lineno"> 311</span>&#160; <span class="keywordflow">case</span> CUSOLVER_STATUS_EXECUTION_FAILED:</div><div class="line"><a name="l00312"></a><span class="lineno"> 312</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CUSOLVER_STATUS_EXECUTION_FAILED&quot;</span>;</div><div class="line"><a name="l00313"></a><span class="lineno"> 313</span>&#160; <span class="keywordflow">case</span> CUSOLVER_STATUS_INTERNAL_ERROR:</div><div class="line"><a name="l00314"></a><span class="lineno"> 314</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CUSOLVER_STATUS_INTERNAL_ERROR&quot;</span>;</div><div class="line"><a name="l00315"></a><span class="lineno"> 315</span>&#160; <span class="keywordflow">case</span> CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:</div><div class="line"><a name="l00316"></a><span class="lineno"> 316</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED&quot;</span>;</div><div class="line"><a name="l00317"></a><span class="lineno"> 317</span>&#160; <span class="keywordflow">default</span>:</div><div class="line"><a name="l00318"></a><span class="lineno"> 318</span>&#160; <span class="keywordflow">break</span>;</div><div class="line"><a name="l00319"></a><span class="lineno"> 319</span>&#160; }</div><div class="line"><a name="l00320"></a><span class="lineno"> 320</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;Unknown cuSOLVER status&quot;</span>;</div><div class="line"><a name="l00321"></a><span class="lineno"> 321</span>&#160;}</div><div class="line"><a name="l00322"></a><span class="lineno"> 322</span>&#160;</div><div class="line"><a name="l00328"></a><span class="lineno"><a class="line" href="namespacemxnet_1_1common_1_1cuda.html#a97c06b2f4d26445a7386b0f54fae1feb"> 328</a></span>&#160;<span class="keyword">inline</span> <span class="keyword">const</span> <span class="keywordtype">char</span>* <a class="code" href="namespacemxnet_1_1common_1_1cuda.html#a97c06b2f4d26445a7386b0f54fae1feb">CurandGetErrorString</a>(curandStatus_t status) {</div><div class="line"><a name="l00329"></a><span class="lineno"> 329</span>&#160; <span class="keywordflow">switch</span> (status) {</div><div class="line"><a name="l00330"></a><span class="lineno"> 330</span>&#160; <span class="keywordflow">case</span> CURAND_STATUS_SUCCESS:</div><div class="line"><a name="l00331"></a><span class="lineno"> 331</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CURAND_STATUS_SUCCESS&quot;</span>;</div><div class="line"><a name="l00332"></a><span class="lineno"> 332</span>&#160; <span class="keywordflow">case</span> CURAND_STATUS_VERSION_MISMATCH:</div><div class="line"><a name="l00333"></a><span class="lineno"> 333</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CURAND_STATUS_VERSION_MISMATCH&quot;</span>;</div><div class="line"><a name="l00334"></a><span class="lineno"> 334</span>&#160; <span class="keywordflow">case</span> CURAND_STATUS_NOT_INITIALIZED:</div><div class="line"><a name="l00335"></a><span class="lineno"> 335</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CURAND_STATUS_NOT_INITIALIZED&quot;</span>;</div><div class="line"><a name="l00336"></a><span class="lineno"> 336</span>&#160; <span class="keywordflow">case</span> CURAND_STATUS_ALLOCATION_FAILED:</div><div class="line"><a name="l00337"></a><span class="lineno"> 337</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CURAND_STATUS_ALLOCATION_FAILED&quot;</span>;</div><div class="line"><a name="l00338"></a><span class="lineno"> 338</span>&#160; <span class="keywordflow">case</span> CURAND_STATUS_TYPE_ERROR:</div><div class="line"><a name="l00339"></a><span class="lineno"> 339</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CURAND_STATUS_TYPE_ERROR&quot;</span>;</div><div class="line"><a name="l00340"></a><span class="lineno"> 340</span>&#160; <span class="keywordflow">case</span> CURAND_STATUS_OUT_OF_RANGE:</div><div class="line"><a name="l00341"></a><span class="lineno"> 341</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CURAND_STATUS_OUT_OF_RANGE&quot;</span>;</div><div class="line"><a name="l00342"></a><span class="lineno"> 342</span>&#160; <span class="keywordflow">case</span> CURAND_STATUS_LENGTH_NOT_MULTIPLE:</div><div class="line"><a name="l00343"></a><span class="lineno"> 343</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CURAND_STATUS_LENGTH_NOT_MULTIPLE&quot;</span>;</div><div class="line"><a name="l00344"></a><span class="lineno"> 344</span>&#160; <span class="keywordflow">case</span> CURAND_STATUS_DOUBLE_PRECISION_REQUIRED:</div><div class="line"><a name="l00345"></a><span class="lineno"> 345</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CURAND_STATUS_DOUBLE_PRECISION_REQUIRED&quot;</span>;</div><div class="line"><a name="l00346"></a><span class="lineno"> 346</span>&#160; <span class="keywordflow">case</span> CURAND_STATUS_LAUNCH_FAILURE:</div><div class="line"><a name="l00347"></a><span class="lineno"> 347</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CURAND_STATUS_LAUNCH_FAILURE&quot;</span>;</div><div class="line"><a name="l00348"></a><span class="lineno"> 348</span>&#160; <span class="keywordflow">case</span> CURAND_STATUS_PREEXISTING_FAILURE:</div><div class="line"><a name="l00349"></a><span class="lineno"> 349</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CURAND_STATUS_PREEXISTING_FAILURE&quot;</span>;</div><div class="line"><a name="l00350"></a><span class="lineno"> 350</span>&#160; <span class="keywordflow">case</span> CURAND_STATUS_INITIALIZATION_FAILED:</div><div class="line"><a name="l00351"></a><span class="lineno"> 351</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CURAND_STATUS_INITIALIZATION_FAILED&quot;</span>;</div><div class="line"><a name="l00352"></a><span class="lineno"> 352</span>&#160; <span class="keywordflow">case</span> CURAND_STATUS_ARCH_MISMATCH:</div><div class="line"><a name="l00353"></a><span class="lineno"> 353</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CURAND_STATUS_ARCH_MISMATCH&quot;</span>;</div><div class="line"><a name="l00354"></a><span class="lineno"> 354</span>&#160; <span class="keywordflow">case</span> CURAND_STATUS_INTERNAL_ERROR:</div><div class="line"><a name="l00355"></a><span class="lineno"> 355</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;CURAND_STATUS_INTERNAL_ERROR&quot;</span>;</div><div class="line"><a name="l00356"></a><span class="lineno"> 356</span>&#160; }</div><div class="line"><a name="l00357"></a><span class="lineno"> 357</span>&#160; <span class="keywordflow">return</span> <span class="stringliteral">&quot;Unknown cuRAND status&quot;</span>;</div><div class="line"><a name="l00358"></a><span class="lineno"> 358</span>&#160;}</div><div class="line"><a name="l00359"></a><span class="lineno"> 359</span>&#160;</div><div class="line"><a name="l00360"></a><span class="lineno"> 360</span>&#160;<span class="keyword">template</span> &lt;<span class="keyword">typename</span> DType&gt;</div><div class="line"><a name="l00361"></a><span class="lineno"><a class="line" href="namespacemxnet_1_1common_1_1cuda.html#a6f3ee04eb382c57e10916108db3efd80"> 361</a></span>&#160;<span class="keyword">inline</span> DType __device__ <a class="code" href="namespacemxnet_1_1common_1_1cuda.html#a6f3ee04eb382c57e10916108db3efd80">CudaMax</a>(DType a, DType b) {</div><div class="line"><a name="l00362"></a><span class="lineno"> 362</span>&#160; <span class="keywordflow">return</span> a &gt; b ? a : b;</div><div class="line"><a name="l00363"></a><span class="lineno"> 363</span>&#160;}</div><div class="line"><a name="l00364"></a><span class="lineno"> 364</span>&#160;</div><div class="line"><a name="l00365"></a><span class="lineno"> 365</span>&#160;<span class="keyword">template</span> &lt;<span class="keyword">typename</span> DType&gt;</div><div class="line"><a name="l00366"></a><span class="lineno"><a class="line" href="namespacemxnet_1_1common_1_1cuda.html#a03888f252f813f6d052ae84bf8801498"> 366</a></span>&#160;<span class="keyword">inline</span> DType __device__ <a class="code" href="namespacemxnet_1_1common_1_1cuda.html#a03888f252f813f6d052ae84bf8801498">CudaMin</a>(DType a, DType b) {</div><div class="line"><a name="l00367"></a><span class="lineno"> 367</span>&#160; <span class="keywordflow">return</span> a &lt; b ? a : b;</div><div class="line"><a name="l00368"></a><span class="lineno"> 368</span>&#160;}</div><div class="line"><a name="l00369"></a><span class="lineno"> 369</span>&#160;</div><div class="line"><a name="l00370"></a><span class="lineno"><a class="line" href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html"> 370</a></span>&#160;<span class="keyword">class </span><a class="code" href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html">DeviceStore</a> {</div><div class="line"><a name="l00371"></a><span class="lineno"> 371</span>&#160; <span class="keyword">public</span>:</div><div class="line"><a name="l00373"></a><span class="lineno"><a class="line" href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html#ad9878a09a93d4fcaf9d0639b3613d9f7"> 373</a></span>&#160; <span class="keyword">explicit</span> <a class="code" href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html#ad9878a09a93d4fcaf9d0639b3613d9f7">DeviceStore</a>(<span class="keywordtype">int</span> requested_device = -1, <span class="keywordtype">bool</span> restore = <span class="keyword">true</span>) :</div><div class="line"><a name="l00374"></a><span class="lineno"> 374</span>&#160; restore_device_(-1),</div><div class="line"><a name="l00375"></a><span class="lineno"> 375</span>&#160; current_device_(requested_device),</div><div class="line"><a name="l00376"></a><span class="lineno"> 376</span>&#160; restore_(restore) {</div><div class="line"><a name="l00377"></a><span class="lineno"> 377</span>&#160; <span class="keywordflow">if</span> (restore_)</div><div class="line"><a name="l00378"></a><span class="lineno"> 378</span>&#160; <a class="code" href="cuda__utils_8h.html#a06cc7d24ca66505e69f5ad40009f5e8d">CUDA_CALL</a>(cudaGetDevice(&amp;restore_device_));</div><div class="line"><a name="l00379"></a><span class="lineno"> 379</span>&#160; <span class="keywordflow">if</span> (requested_device != restore_device_) {</div><div class="line"><a name="l00380"></a><span class="lineno"> 380</span>&#160; <a class="code" href="namespacemshadow.html#abb4c36a0703ec671a5e74b0a8d37a47a">SetDevice</a>(requested_device);</div><div class="line"><a name="l00381"></a><span class="lineno"> 381</span>&#160; }</div><div class="line"><a name="l00382"></a><span class="lineno"> 382</span>&#160; }</div><div class="line"><a name="l00383"></a><span class="lineno"> 383</span>&#160;</div><div class="line"><a name="l00384"></a><span class="lineno"><a class="line" href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html#a701d38ae493688ee2136995fe8611aa0"> 384</a></span>&#160; <a class="code" href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html#a701d38ae493688ee2136995fe8611aa0">~DeviceStore</a>() {</div><div class="line"><a name="l00385"></a><span class="lineno"> 385</span>&#160; <span class="keywordflow">if</span> (restore_ &amp;&amp;</div><div class="line"><a name="l00386"></a><span class="lineno"> 386</span>&#160; current_device_ != restore_device_ &amp;&amp;</div><div class="line"><a name="l00387"></a><span class="lineno"> 387</span>&#160; current_device_ != -1 &amp;&amp;</div><div class="line"><a name="l00388"></a><span class="lineno"> 388</span>&#160; restore_device_ != -1)</div><div class="line"><a name="l00389"></a><span class="lineno"> 389</span>&#160; <a class="code" href="cuda__utils_8h.html#a06cc7d24ca66505e69f5ad40009f5e8d">CUDA_CALL</a>(cudaSetDevice(restore_device_));</div><div class="line"><a name="l00390"></a><span class="lineno"> 390</span>&#160; }</div><div class="line"><a name="l00391"></a><span class="lineno"> 391</span>&#160;</div><div class="line"><a name="l00392"></a><span class="lineno"><a class="line" href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html#a01163fd4915e74bdd81dd7305917f0e4"> 392</a></span>&#160; <span class="keywordtype">void</span> <a class="code" href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html#a01163fd4915e74bdd81dd7305917f0e4">SetDevice</a>(<span class="keywordtype">int</span> device) {</div><div class="line"><a name="l00393"></a><span class="lineno"> 393</span>&#160; <span class="keywordflow">if</span> (device != -1) {</div><div class="line"><a name="l00394"></a><span class="lineno"> 394</span>&#160; <a class="code" href="cuda__utils_8h.html#a06cc7d24ca66505e69f5ad40009f5e8d">CUDA_CALL</a>(cudaSetDevice(device));</div><div class="line"><a name="l00395"></a><span class="lineno"> 395</span>&#160; current_device_ = device;</div><div class="line"><a name="l00396"></a><span class="lineno"> 396</span>&#160; }</div><div class="line"><a name="l00397"></a><span class="lineno"> 397</span>&#160; }</div><div class="line"><a name="l00398"></a><span class="lineno"> 398</span>&#160;</div><div class="line"><a name="l00399"></a><span class="lineno"> 399</span>&#160; <span class="keyword">private</span>:</div><div class="line"><a name="l00400"></a><span class="lineno"> 400</span>&#160; <span class="keywordtype">int</span> restore_device_;</div><div class="line"><a name="l00401"></a><span class="lineno"> 401</span>&#160; <span class="keywordtype">int</span> current_device_;</div><div class="line"><a name="l00402"></a><span class="lineno"> 402</span>&#160; <span class="keywordtype">bool</span> restore_;</div><div class="line"><a name="l00403"></a><span class="lineno"> 403</span>&#160;};</div><div class="line"><a name="l00404"></a><span class="lineno"> 404</span>&#160;</div><div class="line"><a name="l00413"></a><span class="lineno"> 413</span>&#160;<span class="keywordtype">int</span> <a class="code" href="namespacemxnet_1_1common_1_1cuda.html#aa7e0a8f7264c65d8000560d84d7fc54d">get_load_type</a>(<span class="keywordtype">size_t</span> N);</div><div class="line"><a name="l00414"></a><span class="lineno"> 414</span>&#160;</div><div class="line"><a name="l00425"></a><span class="lineno"> 425</span>&#160;<span class="keywordtype">int</span> <a class="code" href="namespacemxnet_1_1common_1_1cuda.html#a7608f1c1700694e453f37cfadfe9e30e">get_rows_per_block</a>(<span class="keywordtype">size_t</span> row_size, <span class="keywordtype">int</span> num_threads_per_block);</div><div class="line"><a name="l00426"></a><span class="lineno"> 426</span>&#160;</div><div class="line"><a name="l00427"></a><span class="lineno"> 427</span>&#160;} <span class="comment">// namespace cuda</span></div><div class="line"><a name="l00428"></a><span class="lineno"> 428</span>&#160;} <span class="comment">// namespace common</span></div><div class="line"><a name="l00429"></a><span class="lineno"> 429</span>&#160;} <span class="comment">// namespace mxnet</span></div><div class="line"><a name="l00430"></a><span class="lineno"> 430</span>&#160;</div><div class="line"><a name="l00432"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a7d0d1e932a096c498381cec82a650cfa"> 432</a></span>&#160;constexpr <span class="keywordtype">size_t</span> <a class="code" href="cuda__utils_8h.html#a7d0d1e932a096c498381cec82a650cfa">kMaxNumGpus</a> = 64;</div><div class="line"><a name="l00433"></a><span class="lineno"> 433</span>&#160;</div><div class="line"><a name="l00434"></a><span class="lineno"> 434</span>&#160;<span class="comment">// The implementations below assume that accesses of 32-bit ints are inherently atomic and</span></div><div class="line"><a name="l00435"></a><span class="lineno"> 435</span>&#160;<span class="comment">// can be read/written by multiple threads without locks. The values held should be &lt; 2^31.</span></div><div class="line"><a name="l00436"></a><span class="lineno"> 436</span>&#160;</div><div class="line"><a name="l00445"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a31f4237a3ff5be2d420461a9baaffd1e"> 445</a></span>&#160;<span class="keyword">inline</span> <span class="keywordtype">int</span> <a class="code" href="cuda__utils_8h.html#a31f4237a3ff5be2d420461a9baaffd1e">cudaAttributeLookup</a>(<span class="keywordtype">int</span> device_id, std::vector&lt;int32_t&gt; *cached_values,</div><div class="line"><a name="l00446"></a><span class="lineno"> 446</span>&#160; cudaDeviceAttr attr, <span class="keyword">const</span> <span class="keywordtype">char</span> *attr_name) {</div><div class="line"><a name="l00447"></a><span class="lineno"> 447</span>&#160; <span class="keywordflow">if</span> (device_id &lt; 0 || device_id &gt;= static_cast&lt;int&gt;(cached_values-&gt;size())) {</div><div class="line"><a name="l00448"></a><span class="lineno"> 448</span>&#160; LOG(FATAL) &lt;&lt; attr_name &lt;&lt; <span class="stringliteral">&quot;(device_id) called with invalid id: &quot;</span> &lt;&lt; device_id;</div><div class="line"><a name="l00449"></a><span class="lineno"> 449</span>&#160; } <span class="keywordflow">else</span> <span class="keywordflow">if</span> ((*cached_values)[device_id] &lt; 0) {</div><div class="line"><a name="l00450"></a><span class="lineno"> 450</span>&#160; <span class="keywordtype">int</span> temp = -1;</div><div class="line"><a name="l00451"></a><span class="lineno"> 451</span>&#160; <a class="code" href="cuda__utils_8h.html#a06cc7d24ca66505e69f5ad40009f5e8d">CUDA_CALL</a>(cudaDeviceGetAttribute(&amp;temp, attr, device_id));</div><div class="line"><a name="l00452"></a><span class="lineno"> 452</span>&#160; (*cached_values)[device_id] = <span class="keyword">static_cast&lt;</span>int32_t<span class="keyword">&gt;</span>(temp);</div><div class="line"><a name="l00453"></a><span class="lineno"> 453</span>&#160; }</div><div class="line"><a name="l00454"></a><span class="lineno"> 454</span>&#160; <span class="keywordflow">return</span> (*cached_values)[device_id];</div><div class="line"><a name="l00455"></a><span class="lineno"> 455</span>&#160;}</div><div class="line"><a name="l00456"></a><span class="lineno"> 456</span>&#160;</div><div class="line"><a name="l00462"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#aa79f548df23452162de37663f171e99d"> 462</a></span>&#160;<span class="keyword">inline</span> <span class="keywordtype">int</span> <a class="code" href="cuda__utils_8h.html#aa79f548df23452162de37663f171e99d">ComputeCapabilityMajor</a>(<span class="keywordtype">int</span> device_id) {</div><div class="line"><a name="l00463"></a><span class="lineno"> 463</span>&#160; <span class="keyword">static</span> std::vector&lt;int32_t&gt; capability_major(<a class="code" href="cuda__utils_8h.html#a7d0d1e932a096c498381cec82a650cfa">kMaxNumGpus</a>, -1);</div><div class="line"><a name="l00464"></a><span class="lineno"> 464</span>&#160; <span class="keywordflow">return</span> <a class="code" href="cuda__utils_8h.html#a31f4237a3ff5be2d420461a9baaffd1e">cudaAttributeLookup</a>(device_id, &amp;capability_major,</div><div class="line"><a name="l00465"></a><span class="lineno"> 465</span>&#160; cudaDevAttrComputeCapabilityMajor, <span class="stringliteral">&quot;ComputeCapabilityMajor&quot;</span>);</div><div class="line"><a name="l00466"></a><span class="lineno"> 466</span>&#160;}</div><div class="line"><a name="l00467"></a><span class="lineno"> 467</span>&#160;</div><div class="line"><a name="l00473"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a7c16e8770e4f399cabed1fc231ffd9b6"> 473</a></span>&#160;<span class="keyword">inline</span> <span class="keywordtype">int</span> <a class="code" href="cuda__utils_8h.html#a7c16e8770e4f399cabed1fc231ffd9b6">ComputeCapabilityMinor</a>(<span class="keywordtype">int</span> device_id) {</div><div class="line"><a name="l00474"></a><span class="lineno"> 474</span>&#160; <span class="keyword">static</span> std::vector&lt;int32_t&gt; capability_minor(<a class="code" href="cuda__utils_8h.html#a7d0d1e932a096c498381cec82a650cfa">kMaxNumGpus</a>, -1);</div><div class="line"><a name="l00475"></a><span class="lineno"> 475</span>&#160; <span class="keywordflow">return</span> <a class="code" href="cuda__utils_8h.html#a31f4237a3ff5be2d420461a9baaffd1e">cudaAttributeLookup</a>(device_id, &amp;capability_minor,</div><div class="line"><a name="l00476"></a><span class="lineno"> 476</span>&#160; cudaDevAttrComputeCapabilityMinor, <span class="stringliteral">&quot;ComputeCapabilityMinor&quot;</span>);</div><div class="line"><a name="l00477"></a><span class="lineno"> 477</span>&#160;}</div><div class="line"><a name="l00478"></a><span class="lineno"> 478</span>&#160;</div><div class="line"><a name="l00484"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a9779e3ad0efd0faec7fbe431c0db896d"> 484</a></span>&#160;<span class="keyword">inline</span> <span class="keywordtype">int</span> <a class="code" href="cuda__utils_8h.html#a9779e3ad0efd0faec7fbe431c0db896d">SMArch</a>(<span class="keywordtype">int</span> device_id) {</div><div class="line"><a name="l00485"></a><span class="lineno"> 485</span>&#160; <span class="keyword">auto</span> major = <a class="code" href="cuda__utils_8h.html#aa79f548df23452162de37663f171e99d">ComputeCapabilityMajor</a>(device_id);</div><div class="line"><a name="l00486"></a><span class="lineno"> 486</span>&#160; <span class="keyword">auto</span> minor = <a class="code" href="cuda__utils_8h.html#a7c16e8770e4f399cabed1fc231ffd9b6">ComputeCapabilityMinor</a>(device_id);</div><div class="line"><a name="l00487"></a><span class="lineno"> 487</span>&#160; <span class="keywordflow">return</span> 10 * major + minor;</div><div class="line"><a name="l00488"></a><span class="lineno"> 488</span>&#160;}</div><div class="line"><a name="l00489"></a><span class="lineno"> 489</span>&#160;</div><div class="line"><a name="l00495"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#ac51c1cdc60e05dd857bfabca52355f2f"> 495</a></span>&#160;<span class="keyword">inline</span> <span class="keywordtype">int</span> <a class="code" href="cuda__utils_8h.html#ac51c1cdc60e05dd857bfabca52355f2f">MultiprocessorCount</a>(<span class="keywordtype">int</span> device_id) {</div><div class="line"><a name="l00496"></a><span class="lineno"> 496</span>&#160; <span class="keyword">static</span> std::vector&lt;int32_t&gt; sm_counts(<a class="code" href="cuda__utils_8h.html#a7d0d1e932a096c498381cec82a650cfa">kMaxNumGpus</a>, -1);</div><div class="line"><a name="l00497"></a><span class="lineno"> 497</span>&#160; <span class="keywordflow">return</span> <a class="code" href="cuda__utils_8h.html#a31f4237a3ff5be2d420461a9baaffd1e">cudaAttributeLookup</a>(device_id, &amp;sm_counts,</div><div class="line"><a name="l00498"></a><span class="lineno"> 498</span>&#160; cudaDevAttrMultiProcessorCount, <span class="stringliteral">&quot;MultiprocessorCount&quot;</span>);</div><div class="line"><a name="l00499"></a><span class="lineno"> 499</span>&#160;}</div><div class="line"><a name="l00500"></a><span class="lineno"> 500</span>&#160;</div><div class="line"><a name="l00506"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#af5b41c04e3d281500957c305532cd478"> 506</a></span>&#160;<span class="keyword">inline</span> <span class="keywordtype">int</span> <a class="code" href="cuda__utils_8h.html#af5b41c04e3d281500957c305532cd478">MaxSharedMemoryPerMultiprocessor</a>(<span class="keywordtype">int</span> device_id) {</div><div class="line"><a name="l00507"></a><span class="lineno"> 507</span>&#160; <span class="keyword">static</span> std::vector&lt;int32_t&gt; max_smem_per_mutiprocessor(<a class="code" href="cuda__utils_8h.html#a7d0d1e932a096c498381cec82a650cfa">kMaxNumGpus</a>, -1);</div><div class="line"><a name="l00508"></a><span class="lineno"> 508</span>&#160; <span class="keywordflow">return</span> <a class="code" href="cuda__utils_8h.html#a31f4237a3ff5be2d420461a9baaffd1e">cudaAttributeLookup</a>(device_id, &amp;max_smem_per_mutiprocessor,</div><div class="line"><a name="l00509"></a><span class="lineno"> 509</span>&#160; cudaDevAttrMaxSharedMemoryPerMultiprocessor,</div><div class="line"><a name="l00510"></a><span class="lineno"> 510</span>&#160; <span class="stringliteral">&quot;MaxSharedMemoryPerMultiprocessor&quot;</span>);</div><div class="line"><a name="l00511"></a><span class="lineno"> 511</span>&#160;}</div><div class="line"><a name="l00512"></a><span class="lineno"> 512</span>&#160;</div><div class="line"><a name="l00518"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a82a24f3db4d0c91374cb3fe7d413f603"> 518</a></span>&#160;<span class="keyword">inline</span> <span class="keywordtype">bool</span> <a class="code" href="cuda__utils_8h.html#a82a24f3db4d0c91374cb3fe7d413f603">SupportsCooperativeLaunch</a>(<span class="keywordtype">int</span> device_id) {</div><div class="line"><a name="l00519"></a><span class="lineno"> 519</span>&#160; <span class="keyword">static</span> std::vector&lt;int32_t&gt; coop_launch(<a class="code" href="cuda__utils_8h.html#a7d0d1e932a096c498381cec82a650cfa">kMaxNumGpus</a>, -1);</div><div class="line"><a name="l00520"></a><span class="lineno"> 520</span>&#160; <span class="keywordflow">return</span> <a class="code" href="cuda__utils_8h.html#a31f4237a3ff5be2d420461a9baaffd1e">cudaAttributeLookup</a>(device_id, &amp;coop_launch,</div><div class="line"><a name="l00521"></a><span class="lineno"> 521</span>&#160; cudaDevAttrCooperativeLaunch, <span class="stringliteral">&quot;SupportsCooperativeLaunch&quot;</span>);</div><div class="line"><a name="l00522"></a><span class="lineno"> 522</span>&#160;}</div><div class="line"><a name="l00523"></a><span class="lineno"> 523</span>&#160;</div><div class="line"><a name="l00530"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#afb4268417c1d8886a39142c85c8f188f"> 530</a></span>&#160;<span class="keyword">inline</span> <span class="keywordtype">bool</span> <a class="code" href="cuda__utils_8h.html#afb4268417c1d8886a39142c85c8f188f">SupportsFloat16Compute</a>(<span class="keywordtype">int</span> device_id) {</div><div class="line"><a name="l00531"></a><span class="lineno"> 531</span>&#160; <span class="keywordflow">if</span> (device_id &lt; 0) {</div><div class="line"><a name="l00532"></a><span class="lineno"> 532</span>&#160; <span class="keywordflow">return</span> <span class="keyword">false</span>;</div><div class="line"><a name="l00533"></a><span class="lineno"> 533</span>&#160; } <span class="keywordflow">else</span> {</div><div class="line"><a name="l00534"></a><span class="lineno"> 534</span>&#160; <span class="comment">// Kepler and most Maxwell GPUs do not support fp16 compute</span></div><div class="line"><a name="l00535"></a><span class="lineno"> 535</span>&#160; <span class="keywordtype">int</span> computeCapabilityMajor = <a class="code" href="cuda__utils_8h.html#aa79f548df23452162de37663f171e99d">ComputeCapabilityMajor</a>(device_id);</div><div class="line"><a name="l00536"></a><span class="lineno"> 536</span>&#160; <span class="keywordflow">return</span> (computeCapabilityMajor &gt; 5) ||</div><div class="line"><a name="l00537"></a><span class="lineno"> 537</span>&#160; (computeCapabilityMajor == 5 &amp;&amp; <a class="code" href="cuda__utils_8h.html#a7c16e8770e4f399cabed1fc231ffd9b6">ComputeCapabilityMinor</a>(device_id) &gt;= 3);</div><div class="line"><a name="l00538"></a><span class="lineno"> 538</span>&#160; }</div><div class="line"><a name="l00539"></a><span class="lineno"> 539</span>&#160;}</div><div class="line"><a name="l00540"></a><span class="lineno"> 540</span>&#160;</div><div class="line"><a name="l00547"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#af7e22ce6d80d61e8ca37df23880ff1a9"> 547</a></span>&#160;<span class="keyword">inline</span> <span class="keywordtype">bool</span> <a class="code" href="cuda__utils_8h.html#af7e22ce6d80d61e8ca37df23880ff1a9">SupportsTensorCore</a>(<span class="keywordtype">int</span> device_id) {</div><div class="line"><a name="l00548"></a><span class="lineno"> 548</span>&#160; <span class="comment">// Volta (sm_70) supports TensorCore algos</span></div><div class="line"><a name="l00549"></a><span class="lineno"> 549</span>&#160; <span class="keywordflow">return</span> device_id &gt;= 0 &amp;&amp;</div><div class="line"><a name="l00550"></a><span class="lineno"> 550</span>&#160; <a class="code" href="cuda__utils_8h.html#aa79f548df23452162de37663f171e99d">ComputeCapabilityMajor</a>(device_id) &gt;=7;</div><div class="line"><a name="l00551"></a><span class="lineno"> 551</span>&#160;}</div><div class="line"><a name="l00552"></a><span class="lineno"> 552</span>&#160;</div><div class="line"><a name="l00553"></a><span class="lineno"> 553</span>&#160;<span class="comment">// The policy if the user hasn&#39;t set the environment variable MXNET_CUDA_ALLOW_TENSOR_CORE</span></div><div class="line"><a name="l00554"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#aa7ba00b841d6b7ba443b0e58dac9ab88"> 554</a></span>&#160;<span class="preprocessor">#define MXNET_CUDA_ALLOW_TENSOR_CORE_DEFAULT true</span></div><div class="line"><a name="l00555"></a><span class="lineno"> 555</span>&#160;</div><div class="line"><a name="l00560"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a464dee13053e3b0b1006c6307069196c"> 560</a></span>&#160;<span class="keyword">inline</span> <span class="keywordtype">bool</span> <a class="code" href="cuda__utils_8h.html#a464dee13053e3b0b1006c6307069196c">GetEnvAllowTensorCore</a>() {</div><div class="line"><a name="l00561"></a><span class="lineno"> 561</span>&#160; <span class="comment">// Since these statics are in the &#39;.h&#39; file, they will exist and will be set</span></div><div class="line"><a name="l00562"></a><span class="lineno"> 562</span>&#160; <span class="comment">// separately in each compilation unit. Not ideal, but cleaner than creating a</span></div><div class="line"><a name="l00563"></a><span class="lineno"> 563</span>&#160; <span class="comment">// cuda_utils.cc solely to have a single instance and initialization.</span></div><div class="line"><a name="l00564"></a><span class="lineno"> 564</span>&#160; <span class="keyword">static</span> <span class="keywordtype">bool</span> allow_tensor_core = <span class="keyword">false</span>;</div><div class="line"><a name="l00565"></a><span class="lineno"> 565</span>&#160; <span class="keyword">static</span> <span class="keywordtype">bool</span> is_set = <span class="keyword">false</span>;</div><div class="line"><a name="l00566"></a><span class="lineno"> 566</span>&#160; <span class="keywordflow">if</span> (!is_set) {</div><div class="line"><a name="l00567"></a><span class="lineno"> 567</span>&#160; <span class="comment">// Use of optional&lt;bool&gt; here permits: &quot;0&quot;, &quot;1&quot;, &quot;true&quot; and &quot;false&quot; to all be legal.</span></div><div class="line"><a name="l00568"></a><span class="lineno"> 568</span>&#160; <span class="keywordtype">bool</span> default_value = <a class="code" href="cuda__utils_8h.html#aa7ba00b841d6b7ba443b0e58dac9ab88">MXNET_CUDA_ALLOW_TENSOR_CORE_DEFAULT</a>;</div><div class="line"><a name="l00569"></a><span class="lineno"> 569</span>&#160; allow_tensor_core = dmlc::GetEnv(<span class="stringliteral">&quot;MXNET_CUDA_ALLOW_TENSOR_CORE&quot;</span>,</div><div class="line"><a name="l00570"></a><span class="lineno"> 570</span>&#160; <a class="code" href="classdmlc_1_1optional.html">dmlc::optional&lt;bool&gt;</a>(default_value)).value();</div><div class="line"><a name="l00571"></a><span class="lineno"> 571</span>&#160; is_set = <span class="keyword">true</span>;</div><div class="line"><a name="l00572"></a><span class="lineno"> 572</span>&#160; }</div><div class="line"><a name="l00573"></a><span class="lineno"> 573</span>&#160; <span class="keywordflow">return</span> allow_tensor_core;</div><div class="line"><a name="l00574"></a><span class="lineno"> 574</span>&#160;}</div><div class="line"><a name="l00575"></a><span class="lineno"> 575</span>&#160;</div><div class="line"><a name="l00576"></a><span class="lineno"> 576</span>&#160;<span class="comment">// The policy if the user hasn&#39;t set the environment variable</span></div><div class="line"><a name="l00577"></a><span class="lineno"> 577</span>&#160;<span class="comment">// CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION</span></div><div class="line"><a name="l00578"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#aa16d34c218441b0d4074baa8c66a5521"> 578</a></span>&#160;<span class="preprocessor">#define MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION_DEFAULT false</span></div><div class="line"><a name="l00579"></a><span class="lineno"> 579</span>&#160;</div><div class="line"><a name="l00583"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#ad77e70546b7f35ecba0098caa2d07523"> 583</a></span>&#160;<span class="keyword">inline</span> <span class="keywordtype">bool</span> <a class="code" href="cuda__utils_8h.html#ad77e70546b7f35ecba0098caa2d07523">GetEnvAllowTensorCoreConversion</a>() {</div><div class="line"><a name="l00584"></a><span class="lineno"> 584</span>&#160; <span class="comment">// Use of optional&lt;bool&gt; here permits: &quot;0&quot;, &quot;1&quot;, &quot;true&quot; and &quot;false&quot; to all be</span></div><div class="line"><a name="l00585"></a><span class="lineno"> 585</span>&#160; <span class="comment">// legal.</span></div><div class="line"><a name="l00586"></a><span class="lineno"> 586</span>&#160; <span class="keywordtype">bool</span> default_value = <a class="code" href="cuda__utils_8h.html#aa16d34c218441b0d4074baa8c66a5521">MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION_DEFAULT</a>;</div><div class="line"><a name="l00587"></a><span class="lineno"> 587</span>&#160; <span class="keywordflow">return</span> dmlc::GetEnv(<span class="stringliteral">&quot;MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION&quot;</span>,</div><div class="line"><a name="l00588"></a><span class="lineno"> 588</span>&#160; <a class="code" href="classdmlc_1_1optional.html">dmlc::optional&lt;bool&gt;</a>(default_value))</div><div class="line"><a name="l00589"></a><span class="lineno"> 589</span>&#160; .value();</div><div class="line"><a name="l00590"></a><span class="lineno"> 590</span>&#160;}</div><div class="line"><a name="l00591"></a><span class="lineno"> 591</span>&#160;</div><div class="line"><a name="l00592"></a><span class="lineno"> 592</span>&#160;<span class="preprocessor">#if CUDA_VERSION &gt;= 9000</span></div><div class="line"><a name="l00593"></a><span class="lineno"> 593</span>&#160;<span class="comment">// Sets the cuBLAS math mode that determines the &#39;allow TensorCore&#39; policy. Returns previous.</span></div><div class="line"><a name="l00594"></a><span class="lineno"> 594</span>&#160;<span class="keyword">inline</span> cublasMath_t SetCublasMathMode(cublasHandle_t blas_handle, cublasMath_t new_math_type) {</div><div class="line"><a name="l00595"></a><span class="lineno"> 595</span>&#160; <span class="keyword">auto</span> handle_math_mode = CUBLAS_DEFAULT_MATH;</div><div class="line"><a name="l00596"></a><span class="lineno"> 596</span>&#160; <a class="code" href="cuda__utils_8h.html#a685d7ca3c9370ff471665abcacdeb381">CUBLAS_CALL</a>(cublasGetMathMode(blas_handle, &amp;handle_math_mode));</div><div class="line"><a name="l00597"></a><span class="lineno"> 597</span>&#160; <a class="code" href="cuda__utils_8h.html#a685d7ca3c9370ff471665abcacdeb381">CUBLAS_CALL</a>(cublasSetMathMode(blas_handle, new_math_type));</div><div class="line"><a name="l00598"></a><span class="lineno"> 598</span>&#160; <span class="keywordflow">return</span> handle_math_mode;</div><div class="line"><a name="l00599"></a><span class="lineno"> 599</span>&#160;}</div><div class="line"><a name="l00600"></a><span class="lineno"> 600</span>&#160;<span class="preprocessor">#endif</span></div><div class="line"><a name="l00601"></a><span class="lineno"> 601</span>&#160;</div><div class="line"><a name="l00602"></a><span class="lineno"> 602</span>&#160;<span class="preprocessor">#endif // MXNET_USE_CUDA</span></div><div class="line"><a name="l00603"></a><span class="lineno"> 603</span>&#160;</div><div class="line"><a name="l00604"></a><span class="lineno"> 604</span>&#160;<span class="preprocessor">#if MXNET_USE_CUDNN</span></div><div class="line"><a name="l00605"></a><span class="lineno"> 605</span>&#160;</div><div class="line"><a name="l00606"></a><span class="lineno"> 606</span>&#160;<span class="preprocessor">#include &lt;cudnn.h&gt;</span></div><div class="line"><a name="l00607"></a><span class="lineno"> 607</span>&#160;</div><div class="line"><a name="l00608"></a><span class="lineno"> 608</span>&#160;<span class="comment">// Creating CUDNN_VERSION_AS_STRING as follows avoids a static_assert error message that shows</span></div><div class="line"><a name="l00609"></a><span class="lineno"> 609</span>&#160;<span class="comment">// the formula for CUDNN_VERSION, i.e. &quot;1000 * 7 + 100 * 6 + 0&quot; rather than number &quot;7600&quot;.</span></div><div class="line"><a name="l00610"></a><span class="lineno"> 610</span>&#160;static_assert(CUDNN_PATCHLEVEL &lt; 100 &amp;&amp; CUDNN_MINOR &lt; 10,</div><div class="line"><a name="l00611"></a><span class="lineno"> 611</span>&#160; <span class="stringliteral">&quot;CUDNN_VERSION_AS_STRING macro assumptions violated.&quot;</span>);</div><div class="line"><a name="l00612"></a><span class="lineno"> 612</span>&#160;<span class="preprocessor">#if CUDNN_PATCHLEVEL &gt;= 10</span></div><div class="line"><a name="l00613"></a><span class="lineno"> 613</span>&#160;<span class="preprocessor">#define CUDNN_VERSION_AS_STRING QUOTEVALUE(CUDNN_MAJOR) \</span></div><div class="line"><a name="l00614"></a><span class="lineno"> 614</span>&#160;<span class="preprocessor"> QUOTEVALUE(CUDNN_MINOR) \</span></div><div class="line"><a name="l00615"></a><span class="lineno"> 615</span>&#160;<span class="preprocessor"> QUOTEVALUE(CUDNN_PATCHLEVEL)</span></div><div class="line"><a name="l00616"></a><span class="lineno"> 616</span>&#160;<span class="preprocessor">#else</span></div><div class="line"><a name="l00617"></a><span class="lineno"> 617</span>&#160;<span class="preprocessor">#define CUDNN_VERSION_AS_STRING QUOTEVALUE(CUDNN_MAJOR) \</span></div><div class="line"><a name="l00618"></a><span class="lineno"> 618</span>&#160;<span class="preprocessor"> QUOTEVALUE(CUDNN_MINOR) \</span></div><div class="line"><a name="l00619"></a><span class="lineno"> 619</span>&#160;<span class="preprocessor"> &quot;0&quot; QUOTEVALUE(CUDNN_PATCHLEVEL)</span></div><div class="line"><a name="l00620"></a><span class="lineno"> 620</span>&#160;<span class="preprocessor">#endif</span></div><div class="line"><a name="l00621"></a><span class="lineno"> 621</span>&#160;</div><div class="line"><a name="l00622"></a><span class="lineno"> 622</span>&#160;<span class="preprocessor">#define STATIC_ASSERT_CUDNN_VERSION_GE(min_version) \</span></div><div class="line"><a name="l00623"></a><span class="lineno"> 623</span>&#160;<span class="preprocessor"> static_assert(CUDNN_VERSION &gt;= min_version, &quot;Compiled-against cuDNN version &quot; \</span></div><div class="line"><a name="l00624"></a><span class="lineno"> 624</span>&#160;<span class="preprocessor"> CUDNN_VERSION_AS_STRING &quot; is too old, please upgrade system to version &quot; \</span></div><div class="line"><a name="l00625"></a><span class="lineno"> 625</span>&#160;<span class="preprocessor"> QUOTEVALUE(min_version) &quot; or later.&quot;)</span></div><div class="line"><a name="l00626"></a><span class="lineno"> 626</span>&#160;</div><div class="line"><a name="l00627"></a><span class="lineno"> 627</span>&#160;<span class="preprocessor">#define CUDNN_CALL(func) \</span></div><div class="line"><a name="l00628"></a><span class="lineno"> 628</span>&#160;<span class="preprocessor"> { \</span></div><div class="line"><a name="l00629"></a><span class="lineno"> 629</span>&#160;<span class="preprocessor"> cudnnStatus_t e = (func); \</span></div><div class="line"><a name="l00630"></a><span class="lineno"> 630</span>&#160;<span class="preprocessor"> CHECK_EQ(e, CUDNN_STATUS_SUCCESS) &lt;&lt; &quot;cuDNN: &quot; &lt;&lt; cudnnGetErrorString(e); \</span></div><div class="line"><a name="l00631"></a><span class="lineno"> 631</span>&#160;<span class="preprocessor"> }</span></div><div class="line"><a name="l00632"></a><span class="lineno"> 632</span>&#160;</div><div class="line"><a name="l00640"></a><span class="lineno"> 640</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">int</span> MaxForwardAlgos(cudnnHandle_t cudnn_handle) {</div><div class="line"><a name="l00641"></a><span class="lineno"> 641</span>&#160; STATIC_ASSERT_CUDNN_VERSION_GE(7000);</div><div class="line"><a name="l00642"></a><span class="lineno"> 642</span>&#160; <span class="keywordtype">int</span> max_algos = 0;</div><div class="line"><a name="l00643"></a><span class="lineno"> 643</span>&#160; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithmMaxCount(cudnn_handle, &amp;max_algos));</div><div class="line"><a name="l00644"></a><span class="lineno"> 644</span>&#160; <span class="keywordflow">return</span> max_algos;</div><div class="line"><a name="l00645"></a><span class="lineno"> 645</span>&#160;}</div><div class="line"><a name="l00646"></a><span class="lineno"> 646</span>&#160;</div><div class="line"><a name="l00654"></a><span class="lineno"> 654</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">int</span> MaxBackwardFilterAlgos(cudnnHandle_t cudnn_handle) {</div><div class="line"><a name="l00655"></a><span class="lineno"> 655</span>&#160; STATIC_ASSERT_CUDNN_VERSION_GE(7000);</div><div class="line"><a name="l00656"></a><span class="lineno"> 656</span>&#160; <span class="keywordtype">int</span> max_algos = 0;</div><div class="line"><a name="l00657"></a><span class="lineno"> 657</span>&#160; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithmMaxCount(cudnn_handle, &amp;max_algos));</div><div class="line"><a name="l00658"></a><span class="lineno"> 658</span>&#160; <span class="keywordflow">return</span> max_algos;</div><div class="line"><a name="l00659"></a><span class="lineno"> 659</span>&#160;}</div><div class="line"><a name="l00660"></a><span class="lineno"> 660</span>&#160;</div><div class="line"><a name="l00668"></a><span class="lineno"> 668</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">int</span> MaxBackwardDataAlgos(cudnnHandle_t cudnn_handle) {</div><div class="line"><a name="l00669"></a><span class="lineno"> 669</span>&#160; STATIC_ASSERT_CUDNN_VERSION_GE(7000);</div><div class="line"><a name="l00670"></a><span class="lineno"> 670</span>&#160; <span class="keywordtype">int</span> max_algos = 0;</div><div class="line"><a name="l00671"></a><span class="lineno"> 671</span>&#160; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithmMaxCount(cudnn_handle, &amp;max_algos));</div><div class="line"><a name="l00672"></a><span class="lineno"> 672</span>&#160; <span class="keywordflow">return</span> max_algos;</div><div class="line"><a name="l00673"></a><span class="lineno"> 673</span>&#160;}</div><div class="line"><a name="l00674"></a><span class="lineno"> 674</span>&#160;</div><div class="line"><a name="l00675"></a><span class="lineno"> 675</span>&#160;<span class="preprocessor">#endif // MXNET_USE_CUDNN</span></div><div class="line"><a name="l00676"></a><span class="lineno"> 676</span>&#160;</div><div class="line"><a name="l00677"></a><span class="lineno"> 677</span>&#160;<span class="comment">// Overload atomicAdd to work for floats on all architectures</span></div><div class="line"><a name="l00678"></a><span class="lineno"> 678</span>&#160;<span class="preprocessor">#if defined(__CUDA_ARCH__) &amp;&amp; __CUDA_ARCH__ &lt; 600</span></div><div class="line"><a name="l00679"></a><span class="lineno"> 679</span>&#160;<span class="comment">// From CUDA Programming Guide</span></div><div class="line"><a name="l00680"></a><span class="lineno"> 680</span>&#160;<span class="keyword">static</span> <span class="keyword">inline</span> __device__ <span class="keywordtype">void</span> atomicAdd(<span class="keywordtype">double</span> *address, <span class="keywordtype">double</span> val) {</div><div class="line"><a name="l00681"></a><span class="lineno"> 681</span>&#160; <span class="keywordtype">unsigned</span> <span class="keywordtype">long</span> <span class="keywordtype">long</span>* address_as_ull = <span class="comment">// NOLINT(*)</span></div><div class="line"><a name="l00682"></a><span class="lineno"> 682</span>&#160; <span class="keyword">reinterpret_cast&lt;</span><span class="keywordtype">unsigned</span> <span class="keywordtype">long</span> <span class="keywordtype">long</span>*<span class="keyword">&gt;</span>(address); <span class="comment">// NOLINT(*)</span></div><div class="line"><a name="l00683"></a><span class="lineno"> 683</span>&#160; <span class="keywordtype">unsigned</span> <span class="keywordtype">long</span> <span class="keywordtype">long</span> old = *address_as_ull; <span class="comment">// NOLINT(*)</span></div><div class="line"><a name="l00684"></a><span class="lineno"> 684</span>&#160; <span class="keywordtype">unsigned</span> <span class="keywordtype">long</span> <span class="keywordtype">long</span> assumed; <span class="comment">// NOLINT(*)</span></div><div class="line"><a name="l00685"></a><span class="lineno"> 685</span>&#160;</div><div class="line"><a name="l00686"></a><span class="lineno"> 686</span>&#160; <span class="keywordflow">do</span> {</div><div class="line"><a name="l00687"></a><span class="lineno"> 687</span>&#160; assumed = old;</div><div class="line"><a name="l00688"></a><span class="lineno"> 688</span>&#160; old = atomicCAS(address_as_ull, assumed,</div><div class="line"><a name="l00689"></a><span class="lineno"> 689</span>&#160; __double_as_longlong(val +</div><div class="line"><a name="l00690"></a><span class="lineno"> 690</span>&#160; __longlong_as_double(assumed)));</div><div class="line"><a name="l00691"></a><span class="lineno"> 691</span>&#160;</div><div class="line"><a name="l00692"></a><span class="lineno"> 692</span>&#160; <span class="comment">// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)</span></div><div class="line"><a name="l00693"></a><span class="lineno"> 693</span>&#160; } <span class="keywordflow">while</span> (assumed != old);</div><div class="line"><a name="l00694"></a><span class="lineno"> 694</span>&#160;}</div><div class="line"><a name="l00695"></a><span class="lineno"> 695</span>&#160;<span class="preprocessor">#endif</span></div><div class="line"><a name="l00696"></a><span class="lineno"> 696</span>&#160;</div><div class="line"><a name="l00697"></a><span class="lineno"> 697</span>&#160;<span class="comment">// Overload atomicAdd for half precision</span></div><div class="line"><a name="l00698"></a><span class="lineno"> 698</span>&#160;<span class="comment">// Taken from:</span></div><div class="line"><a name="l00699"></a><span class="lineno"> 699</span>&#160;<span class="comment">// https://github.com/torch/cutorch/blob/master/lib/THC/THCAtomics.cuh</span></div><div class="line"><a name="l00700"></a><span class="lineno"> 700</span>&#160;<span class="preprocessor">#ifdef __CUDACC__</span></div><div class="line"><a name="l00701"></a><span class="lineno"> 701</span>&#160;<span class="keyword">static</span> <span class="keyword">inline</span> __device__ <span class="keywordtype">void</span> atomicAdd(mshadow::half::half_t *address,</div><div class="line"><a name="l00702"></a><span class="lineno"> 702</span>&#160; mshadow::half::half_t val) {</div><div class="line"><a name="l00703"></a><span class="lineno"> 703</span>&#160; <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> *address_as_ui =</div><div class="line"><a name="l00704"></a><span class="lineno"> 704</span>&#160; <span class="keyword">reinterpret_cast&lt;</span><span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> *<span class="keyword">&gt;</span>(<span class="keyword">reinterpret_cast&lt;</span><span class="keywordtype">char</span> *<span class="keyword">&gt;</span>(address) -</div><div class="line"><a name="l00705"></a><span class="lineno"> 705</span>&#160; (reinterpret_cast&lt;size_t&gt;(address) &amp; 2));</div><div class="line"><a name="l00706"></a><span class="lineno"> 706</span>&#160; <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> old = *address_as_ui;</div><div class="line"><a name="l00707"></a><span class="lineno"> 707</span>&#160; <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> assumed;</div><div class="line"><a name="l00708"></a><span class="lineno"> 708</span>&#160;</div><div class="line"><a name="l00709"></a><span class="lineno"> 709</span>&#160; <span class="keywordflow">do</span> {</div><div class="line"><a name="l00710"></a><span class="lineno"> 710</span>&#160; assumed = old;</div><div class="line"><a name="l00711"></a><span class="lineno"> 711</span>&#160; mshadow::half::half_t hsum;</div><div class="line"><a name="l00712"></a><span class="lineno"> 712</span>&#160; hsum.half_ =</div><div class="line"><a name="l00713"></a><span class="lineno"> 713</span>&#160; <span class="keyword">reinterpret_cast&lt;</span><span class="keywordtype">size_t</span><span class="keyword">&gt;</span>(address) &amp; 2 ? (old &gt;&gt; 16) : (old &amp; 0xffff);</div><div class="line"><a name="l00714"></a><span class="lineno"> 714</span>&#160; hsum += val;</div><div class="line"><a name="l00715"></a><span class="lineno"> 715</span>&#160; old = <span class="keyword">reinterpret_cast&lt;</span><span class="keywordtype">size_t</span><span class="keyword">&gt;</span>(address) &amp; 2</div><div class="line"><a name="l00716"></a><span class="lineno"> 716</span>&#160; ? (old &amp; 0xffff) | (hsum.half_ &lt;&lt; 16)</div><div class="line"><a name="l00717"></a><span class="lineno"> 717</span>&#160; : (old &amp; 0xffff0000) | hsum.half_;</div><div class="line"><a name="l00718"></a><span class="lineno"> 718</span>&#160; old = atomicCAS(address_as_ui, assumed, old);</div><div class="line"><a name="l00719"></a><span class="lineno"> 719</span>&#160; } <span class="keywordflow">while</span> (assumed != old);</div><div class="line"><a name="l00720"></a><span class="lineno"> 720</span>&#160;}</div><div class="line"><a name="l00721"></a><span class="lineno"> 721</span>&#160;</div><div class="line"><a name="l00722"></a><span class="lineno"> 722</span>&#160;<span class="keyword">static</span> <span class="keyword">inline</span> __device__ <span class="keywordtype">void</span> atomicAdd(uint8_t *address, uint8_t val) {</div><div class="line"><a name="l00723"></a><span class="lineno"> 723</span>&#160; <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> * address_as_ui = (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> *) (address - ((<span class="keywordtype">size_t</span>)address &amp; 0x3));</div><div class="line"><a name="l00724"></a><span class="lineno"> 724</span>&#160; <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> old = *address_as_ui;</div><div class="line"><a name="l00725"></a><span class="lineno"> 725</span>&#160; <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> shift = (((size_t)address &amp; 0x3) &lt;&lt; 3);</div><div class="line"><a name="l00726"></a><span class="lineno"> 726</span>&#160; <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> sum;</div><div class="line"><a name="l00727"></a><span class="lineno"> 727</span>&#160; <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> assumed;</div><div class="line"><a name="l00728"></a><span class="lineno"> 728</span>&#160;</div><div class="line"><a name="l00729"></a><span class="lineno"> 729</span>&#160; <span class="keywordflow">do</span> {</div><div class="line"><a name="l00730"></a><span class="lineno"> 730</span>&#160; assumed = old;</div><div class="line"><a name="l00731"></a><span class="lineno"> 731</span>&#160; sum = val + <span class="keyword">static_cast&lt;</span>uint8_t<span class="keyword">&gt;</span>((old &gt;&gt; shift) &amp; 0xff);</div><div class="line"><a name="l00732"></a><span class="lineno"> 732</span>&#160; old = (old &amp; ~(0x000000ff &lt;&lt; shift)) | (sum &lt;&lt; shift);</div><div class="line"><a name="l00733"></a><span class="lineno"> 733</span>&#160; old = atomicCAS(address_as_ui, assumed, old);</div><div class="line"><a name="l00734"></a><span class="lineno"> 734</span>&#160; } <span class="keywordflow">while</span> (assumed != old);</div><div class="line"><a name="l00735"></a><span class="lineno"> 735</span>&#160;}</div><div class="line"><a name="l00736"></a><span class="lineno"> 736</span>&#160;</div><div class="line"><a name="l00737"></a><span class="lineno"> 737</span>&#160;<span class="keyword">static</span> <span class="keyword">inline</span> __device__ <span class="keywordtype">void</span> atomicAdd(int8_t *address, int8_t val) {</div><div class="line"><a name="l00738"></a><span class="lineno"> 738</span>&#160; <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> * address_as_ui = (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> *) (address - ((<span class="keywordtype">size_t</span>)address &amp; 0x3));</div><div class="line"><a name="l00739"></a><span class="lineno"> 739</span>&#160; <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> old = *address_as_ui;</div><div class="line"><a name="l00740"></a><span class="lineno"> 740</span>&#160; <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> shift = (((size_t)address &amp; 0x3) &lt;&lt; 3);</div><div class="line"><a name="l00741"></a><span class="lineno"> 741</span>&#160; <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> sum;</div><div class="line"><a name="l00742"></a><span class="lineno"> 742</span>&#160; <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> assumed;</div><div class="line"><a name="l00743"></a><span class="lineno"> 743</span>&#160;</div><div class="line"><a name="l00744"></a><span class="lineno"> 744</span>&#160; <span class="keywordflow">do</span> {</div><div class="line"><a name="l00745"></a><span class="lineno"> 745</span>&#160; assumed = old;</div><div class="line"><a name="l00746"></a><span class="lineno"> 746</span>&#160; sum = val + <span class="keyword">static_cast&lt;</span>int8_t<span class="keyword">&gt;</span>((old &gt;&gt; shift) &amp; 0xff);</div><div class="line"><a name="l00747"></a><span class="lineno"> 747</span>&#160; old = (old &amp; ~(0x000000ff &lt;&lt; shift)) | (sum &lt;&lt; shift);</div><div class="line"><a name="l00748"></a><span class="lineno"> 748</span>&#160; old = atomicCAS(address_as_ui, assumed, old);</div><div class="line"><a name="l00749"></a><span class="lineno"> 749</span>&#160; } <span class="keywordflow">while</span> (assumed != old);</div><div class="line"><a name="l00750"></a><span class="lineno"> 750</span>&#160;}</div><div class="line"><a name="l00751"></a><span class="lineno"> 751</span>&#160;</div><div class="line"><a name="l00752"></a><span class="lineno"> 752</span>&#160;<span class="comment">// Overload atomicAdd to work for signed int64 on all architectures</span></div><div class="line"><a name="l00753"></a><span class="lineno"> 753</span>&#160;<span class="keyword">static</span> <span class="keyword">inline</span> __device__ <span class="keywordtype">void</span> atomicAdd(int64_t *address, int64_t val) {</div><div class="line"><a name="l00754"></a><span class="lineno"> 754</span>&#160; atomicAdd(reinterpret_cast&lt;unsigned long long*&gt;(address), static_cast&lt;unsigned long long&gt;(val)); <span class="comment">// NOLINT</span></div><div class="line"><a name="l00755"></a><span class="lineno"> 755</span>&#160;}</div><div class="line"><a name="l00756"></a><span class="lineno"> 756</span>&#160;</div><div class="line"><a name="l00757"></a><span class="lineno"> 757</span>&#160;<span class="keyword">template</span> &lt;<span class="keyword">typename</span> DType&gt;</div><div class="line"><a name="l00758"></a><span class="lineno"> 758</span>&#160;__device__ <span class="keyword">inline</span> DType ldg(<span class="keyword">const</span> DType* address) {</div><div class="line"><a name="l00759"></a><span class="lineno"> 759</span>&#160;<span class="preprocessor">#if __CUDA_ARCH__ &gt;= 350</span></div><div class="line"><a name="l00760"></a><span class="lineno"> 760</span>&#160; <span class="keywordflow">return</span> __ldg(address);</div><div class="line"><a name="l00761"></a><span class="lineno"> 761</span>&#160;<span class="preprocessor">#else</span></div><div class="line"><a name="l00762"></a><span class="lineno"> 762</span>&#160; <span class="keywordflow">return</span> *address;</div><div class="line"><a name="l00763"></a><span class="lineno"> 763</span>&#160;<span class="preprocessor">#endif</span></div><div class="line"><a name="l00764"></a><span class="lineno"> 764</span>&#160;}</div><div class="line"><a name="l00765"></a><span class="lineno"> 765</span>&#160;</div><div class="line"><a name="l00766"></a><span class="lineno"> 766</span>&#160;<span class="keyword">template</span> &lt;<span class="keyword">typename</span> OP, <span class="keyword">typename</span> T&gt;</div><div class="line"><a name="l00767"></a><span class="lineno"> 767</span>&#160;__device__ <span class="keyword">inline</span> T warp_reduce(T value, OP redfun) {</div><div class="line"><a name="l00768"></a><span class="lineno"> 768</span>&#160; value = redfun(value, __shfl_down_sync(0xffffffff, value, 16));</div><div class="line"><a name="l00769"></a><span class="lineno"> 769</span>&#160; value = redfun(value, __shfl_down_sync(0xffffffff, value, 8));</div><div class="line"><a name="l00770"></a><span class="lineno"> 770</span>&#160; value = redfun(value, __shfl_down_sync(0xffffffff, value, 4));</div><div class="line"><a name="l00771"></a><span class="lineno"> 771</span>&#160; value = redfun(value, __shfl_down_sync(0xffffffff, value, 2));</div><div class="line"><a name="l00772"></a><span class="lineno"> 772</span>&#160; value = redfun(value, __shfl_down_sync(0xffffffff, value, 1));</div><div class="line"><a name="l00773"></a><span class="lineno"> 773</span>&#160; <span class="keywordflow">return</span> value;</div><div class="line"><a name="l00774"></a><span class="lineno"> 774</span>&#160;}</div><div class="line"><a name="l00775"></a><span class="lineno"> 775</span>&#160;</div><div class="line"><a name="l00776"></a><span class="lineno"> 776</span>&#160;<span class="keyword">template</span> &lt;<span class="keyword">typename</span> OP&gt;</div><div class="line"><a name="l00777"></a><span class="lineno"> 777</span>&#160;__device__ <span class="keyword">inline</span> mshadow::half::half_t warp_reduce(mshadow::half::half_t value, OP redfun) {</div><div class="line"><a name="l00778"></a><span class="lineno"> 778</span>&#160; <span class="keywordtype">float</span> v = <span class="keyword">static_cast&lt;</span><span class="keywordtype">float</span><span class="keyword">&gt;</span>(value);</div><div class="line"><a name="l00779"></a><span class="lineno"> 779</span>&#160; v = redfun(v, __shfl_down_sync(0xffffffff, v, 16));</div><div class="line"><a name="l00780"></a><span class="lineno"> 780</span>&#160; v = redfun(v, __shfl_down_sync(0xffffffff, v, 8));</div><div class="line"><a name="l00781"></a><span class="lineno"> 781</span>&#160; v = redfun(v, __shfl_down_sync(0xffffffff, v, 4));</div><div class="line"><a name="l00782"></a><span class="lineno"> 782</span>&#160; v = redfun(v, __shfl_down_sync(0xffffffff, v, 2));</div><div class="line"><a name="l00783"></a><span class="lineno"> 783</span>&#160; v = redfun(v, __shfl_down_sync(0xffffffff, v, 1));</div><div class="line"><a name="l00784"></a><span class="lineno"> 784</span>&#160; <span class="keywordflow">return</span> mshadow::half::half_t(v);</div><div class="line"><a name="l00785"></a><span class="lineno"> 785</span>&#160;}</div><div class="line"><a name="l00786"></a><span class="lineno"> 786</span>&#160;</div><div class="line"><a name="l00787"></a><span class="lineno"> 787</span>&#160;<span class="preprocessor">#endif // __CUDACC__</span></div><div class="line"><a name="l00788"></a><span class="lineno"> 788</span>&#160;</div><div class="line"><a name="l00789"></a><span class="lineno"> 789</span>&#160;<span class="preprocessor">#endif // MXNET_COMMON_CUDA_UTILS_H_</span></div><div class="ttc" id="namespacemshadow_html_a936bbfe6aeead8902973c098b87f18c1a404a5fd26328cf46170f6eb3424c9633"><div class="ttname"><a href="namespacemshadow.html#a936bbfe6aeead8902973c098b87f18c1a404a5fd26328cf46170f6eb3424c9633">mshadow::kFloat32</a></div><div class="ttdef"><b>Definition:</b> base.h:359</div></div>
<div class="ttc" id="cuda__utils_8h_html_a685d7ca3c9370ff471665abcacdeb381"><div class="ttname"><a href="cuda__utils_8h.html#a685d7ca3c9370ff471665abcacdeb381">CUBLAS_CALL</a></div><div class="ttdeci">#define CUBLAS_CALL(func)</div><div class="ttdoc">Protected cuBLAS call. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:110</div></div>
<div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4_html_acb859823c71c7c2aeeb55de510dcb1b4"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html#acb859823c71c7c2aeeb55de510dcb1b4">mxnet::common::cuda::CublasType&lt; double &gt;::zero</a></div><div class="ttdeci">static const double zero</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:219</div></div>
<div class="ttc" id="optional_8h_html"><div class="ttname"><a href="optional_8h.html">optional.h</a></div><div class="ttdoc">Container to hold optional data. </div></div>
<div class="ttc" id="cuda__utils_8h_html_aa79f548df23452162de37663f171e99d"><div class="ttname"><a href="cuda__utils_8h.html#aa79f548df23452162de37663f171e99d">ComputeCapabilityMajor</a></div><div class="ttdeci">int ComputeCapabilityMajor(int device_id)</div><div class="ttdoc">Determine major version number of the gpu&amp;#39;s cuda compute architecture. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:462</div></div>
<div class="ttc" id="classmxnet_1_1common_1_1cuda_1_1DeviceStore_html"><div class="ttname"><a href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html">mxnet::common::cuda::DeviceStore</a></div><div class="ttdef"><b>Definition:</b> cuda_utils.h:370</div></div>
<div class="ttc" id="classdmlc_1_1optional_html"><div class="ttname"><a href="classdmlc_1_1optional.html">dmlc::optional</a></div><div class="ttdoc">c++17 compatible optional class. </div><div class="ttdef"><b>Definition:</b> optional.h:43</div></div>
<div class="ttc" id="cuda__utils_8h_html_ad77e70546b7f35ecba0098caa2d07523"><div class="ttname"><a href="cuda__utils_8h.html#ad77e70546b7f35ecba0098caa2d07523">GetEnvAllowTensorCoreConversion</a></div><div class="ttdeci">bool GetEnvAllowTensorCoreConversion()</div><div class="ttdoc">Returns global policy for TensorCore implicit type casting. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:583</div></div>
<div class="ttc" id="namespacemxnet_html"><div class="ttname"><a href="namespacemxnet.html">mxnet</a></div><div class="ttdoc">namespace of mxnet </div><div class="ttdef"><b>Definition:</b> api_registry.h:33</div></div>
<div class="ttc" id="namespacemxnet_1_1common_1_1cuda_html_aa7e0a8f7264c65d8000560d84d7fc54d"><div class="ttname"><a href="namespacemxnet_1_1common_1_1cuda.html#aa7e0a8f7264c65d8000560d84d7fc54d">mxnet::common::cuda::get_load_type</a></div><div class="ttdeci">int get_load_type(size_t N)</div><div class="ttdoc">Get the largest datatype suitable to read requested number of bytes. </div></div>
<div class="ttc" id="cuda__utils_8h_html_a464dee13053e3b0b1006c6307069196c"><div class="ttname"><a href="cuda__utils_8h.html#a464dee13053e3b0b1006c6307069196c">GetEnvAllowTensorCore</a></div><div class="ttdeci">bool GetEnvAllowTensorCore()</div><div class="ttdoc">Returns global policy for TensorCore algo use. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:560</div></div>
<div class="ttc" id="classmxnet_1_1common_1_1cuda_1_1DeviceStore_html_ad9878a09a93d4fcaf9d0639b3613d9f7"><div class="ttname"><a href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html#ad9878a09a93d4fcaf9d0639b3613d9f7">mxnet::common::cuda::DeviceStore::DeviceStore</a></div><div class="ttdeci">DeviceStore(int requested_device=-1, bool restore=true)</div><div class="ttdoc">default constructor- only optionally restores previous device </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:373</div></div>
<div class="ttc" id="cuda__utils_8h_html_a9779e3ad0efd0faec7fbe431c0db896d"><div class="ttname"><a href="cuda__utils_8h.html#a9779e3ad0efd0faec7fbe431c0db896d">SMArch</a></div><div class="ttdeci">int SMArch(int device_id)</div><div class="ttdoc">Return the integer SM architecture (e.g. Volta = 70). </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:484</div></div>
<div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01int32__t_01_4_html_a237f23f560dad8c0299c11a14f1dee23"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01int32__t_01_4.html#a237f23f560dad8c0299c11a14f1dee23">mxnet::common::cuda::CublasType&lt; int32_t &gt;::ScaleType</a></div><div class="ttdeci">int32_t ScaleType</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:247</div></div>
<div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4_html_a98a73866e9513d63627f935531456ca7"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html#a98a73866e9513d63627f935531456ca7">mxnet::common::cuda::CublasType&lt; float &gt;::zero</a></div><div class="ttdeci">static const float zero</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:209</div></div>
<div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4_html_a437fb574fefbe87d2add1289074b194a"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html#a437fb574fefbe87d2add1289074b194a">mxnet::common::cuda::CublasType&lt; float &gt;::one</a></div><div class="ttdeci">static const float one</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:208</div></div>
<div class="ttc" id="namespacemxnet_1_1common_1_1cuda_html_a03888f252f813f6d052ae84bf8801498"><div class="ttname"><a href="namespacemxnet_1_1common_1_1cuda.html#a03888f252f813f6d052ae84bf8801498">mxnet::common::cuda::CudaMin</a></div><div class="ttdeci">DType __device__ CudaMin(DType a, DType b)</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:366</div></div>
<div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4_html_a17c4026782d6a86b7d11aae44b684969"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html#a17c4026782d6a86b7d11aae44b684969">mxnet::common::cuda::CublasType&lt; double &gt;::one</a></div><div class="ttdeci">static const double one</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:218</div></div>
<div class="ttc" id="cuda__utils_8h_html_ac51c1cdc60e05dd857bfabca52355f2f"><div class="ttname"><a href="cuda__utils_8h.html#ac51c1cdc60e05dd857bfabca52355f2f">MultiprocessorCount</a></div><div class="ttdeci">int MultiprocessorCount(int device_id)</div><div class="ttdoc">Return the number of streaming multiprocessors of GPU device_id. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:495</div></div>
<div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4_html_ae8222ef1a6cba23c5f196393d74c45ce"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html#ae8222ef1a6cba23c5f196393d74c45ce">mxnet::common::cuda::CublasType&lt; mshadow::half::half_t &gt;::zero</a></div><div class="ttdeci">static const mshadow::half::half_t zero</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:229</div></div>
<div class="ttc" id="classmxnet_1_1common_1_1cuda_1_1DeviceStore_html_a01163fd4915e74bdd81dd7305917f0e4"><div class="ttname"><a href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html#a01163fd4915e74bdd81dd7305917f0e4">mxnet::common::cuda::DeviceStore::SetDevice</a></div><div class="ttdeci">void SetDevice(int device)</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:392</div></div>
<div class="ttc" id="cuda__utils_8h_html_afb4268417c1d8886a39142c85c8f188f"><div class="ttname"><a href="cuda__utils_8h.html#afb4268417c1d8886a39142c85c8f188f">SupportsFloat16Compute</a></div><div class="ttdeci">bool SupportsFloat16Compute(int device_id)</div><div class="ttdoc">Determine whether a cuda-capable gpu&amp;#39;s architecture supports float16 math. Assume not if device_id is...</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:530</div></div>
<div class="ttc" id="cuda__utils_8h_html_a7d0d1e932a096c498381cec82a650cfa"><div class="ttname"><a href="cuda__utils_8h.html#a7d0d1e932a096c498381cec82a650cfa">kMaxNumGpus</a></div><div class="ttdeci">constexpr size_t kMaxNumGpus</div><div class="ttdoc">Maximum number of GPUs. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:432</div></div>
<div class="ttc" id="namespacemxnet_1_1common_1_1cuda_html_a6f3ee04eb382c57e10916108db3efd80"><div class="ttname"><a href="namespacemxnet_1_1common_1_1cuda.html#a6f3ee04eb382c57e10916108db3efd80">mxnet::common::cuda::CudaMax</a></div><div class="ttdeci">DType __device__ CudaMax(DType a, DType b)</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:361</div></div>
<div class="ttc" id="namespacemshadow_html_a936bbfe6aeead8902973c098b87f18c1a1f5a1c62216cbd2200443d501924cf28"><div class="ttname"><a href="namespacemshadow.html#a936bbfe6aeead8902973c098b87f18c1a1f5a1c62216cbd2200443d501924cf28">mshadow::kFloat64</a></div><div class="ttdef"><b>Definition:</b> base.h:360</div></div>
<div class="ttc" id="namespacemshadow_html_abb4c36a0703ec671a5e74b0a8d37a47a"><div class="ttname"><a href="namespacemshadow.html#abb4c36a0703ec671a5e74b0a8d37a47a">mshadow::SetDevice</a></div><div class="ttdeci">void SetDevice(int devid)</div><div class="ttdoc">set the device of current thread to work on </div></div>
<div class="ttc" id="cuda__utils_8h_html_aa16d34c218441b0d4074baa8c66a5521"><div class="ttname"><a href="cuda__utils_8h.html#aa16d34c218441b0d4074baa8c66a5521">MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION_DEFAULT</a></div><div class="ttdeci">#define MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION_DEFAULT</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:578</div></div>
<div class="ttc" id="cuda__utils_8h_html_a31f4237a3ff5be2d420461a9baaffd1e"><div class="ttname"><a href="cuda__utils_8h.html#a31f4237a3ff5be2d420461a9baaffd1e">cudaAttributeLookup</a></div><div class="ttdeci">int cudaAttributeLookup(int device_id, std::vector&lt; int32_t &gt; *cached_values, cudaDeviceAttr attr, const char *attr_name)</div><div class="ttdoc">Return an attribute GPU device_id. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:445</div></div>
<div class="ttc" id="namespacemshadow_html_a936bbfe6aeead8902973c098b87f18c1a4fbb02e389c3126918b505cd01188368"><div class="ttname"><a href="namespacemshadow.html#a936bbfe6aeead8902973c098b87f18c1a4fbb02e389c3126918b505cd01188368">mshadow::kInt32</a></div><div class="ttdef"><b>Definition:</b> base.h:363</div></div>
<div class="ttc" id="cuda__utils_8h_html_af7e22ce6d80d61e8ca37df23880ff1a9"><div class="ttname"><a href="cuda__utils_8h.html#af7e22ce6d80d61e8ca37df23880ff1a9">SupportsTensorCore</a></div><div class="ttdeci">bool SupportsTensorCore(int device_id)</div><div class="ttdoc">Determine whether a cuda-capable gpu&amp;#39;s architecture supports Tensor Core math. Assume not if device_i...</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:547</div></div>
<div class="ttc" id="namespacemxnet_1_1common_1_1cuda_html_abf9bcb4cb696e9ae61b818510dac39c8"><div class="ttname"><a href="namespacemxnet_1_1common_1_1cuda.html#abf9bcb4cb696e9ae61b818510dac39c8">mxnet::common::cuda::CusolverGetErrorString</a></div><div class="ttdeci">const char * CusolverGetErrorString(cusolverStatus_t error)</div><div class="ttdoc">Get string representation of cuSOLVER errors. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:299</div></div>
<div class="ttc" id="cuda__utils_8h_html_aa7ba00b841d6b7ba443b0e58dac9ab88"><div class="ttname"><a href="cuda__utils_8h.html#aa7ba00b841d6b7ba443b0e58dac9ab88">MXNET_CUDA_ALLOW_TENSOR_CORE_DEFAULT</a></div><div class="ttdeci">#define MXNET_CUDA_ALLOW_TENSOR_CORE_DEFAULT</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:554</div></div>
<div class="ttc" id="namespacemxnet_1_1common_1_1cuda_html_a97c06b2f4d26445a7386b0f54fae1feb"><div class="ttname"><a href="namespacemxnet_1_1common_1_1cuda.html#a97c06b2f4d26445a7386b0f54fae1feb">mxnet::common::cuda::CurandGetErrorString</a></div><div class="ttdeci">const char * CurandGetErrorString(curandStatus_t status)</div><div class="ttdoc">Get string representation of cuRAND errors. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:328</div></div>
<div class="ttc" id="cuda__utils_8h_html_af5b41c04e3d281500957c305532cd478"><div class="ttname"><a href="cuda__utils_8h.html#af5b41c04e3d281500957c305532cd478">MaxSharedMemoryPerMultiprocessor</a></div><div class="ttdeci">int MaxSharedMemoryPerMultiprocessor(int device_id)</div><div class="ttdoc">Return the shared memory size in bytes of each of the GPU&amp;#39;s streaming multiprocessors. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:506</div></div>
<div class="ttc" id="namespacemshadow_html_a936bbfe6aeead8902973c098b87f18c1a1a39d2f8230da3cb53528904c8a5fff0"><div class="ttname"><a href="namespacemshadow.html#a936bbfe6aeead8902973c098b87f18c1a1a39d2f8230da3cb53528904c8a5fff0">mshadow::kUint8</a></div><div class="ttdef"><b>Definition:</b> base.h:362</div></div>
<div class="ttc" id="classmxnet_1_1common_1_1cuda_1_1DeviceStore_html_a701d38ae493688ee2136995fe8611aa0"><div class="ttname"><a href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html#a701d38ae493688ee2136995fe8611aa0">mxnet::common::cuda::DeviceStore::~DeviceStore</a></div><div class="ttdeci">~DeviceStore()</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:384</div></div>
<div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4_html_a707d99741473be6edc5f4c345690e9ee"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html#a707d99741473be6edc5f4c345690e9ee">mxnet::common::cuda::CublasType&lt; mshadow::half::half_t &gt;::ScaleType</a></div><div class="ttdeci">float ScaleType</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:227</div></div>
<div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01uint8__t_01_4_html_a3af01d0a12763530b9568e836c4655e0"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01uint8__t_01_4.html#a3af01d0a12763530b9568e836c4655e0">mxnet::common::cuda::CublasType&lt; uint8_t &gt;::ScaleType</a></div><div class="ttdeci">uint8_t ScaleType</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:237</div></div>
<div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_html"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType.html">mxnet::common::cuda::CublasType</a></div><div class="ttdoc">Converts between C++ datatypes and enums/constants needed by cuBLAS. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:193</div></div>
<div class="ttc" id="cuda__utils_8h_html_a7c16e8770e4f399cabed1fc231ffd9b6"><div class="ttname"><a href="cuda__utils_8h.html#a7c16e8770e4f399cabed1fc231ffd9b6">ComputeCapabilityMinor</a></div><div class="ttdeci">int ComputeCapabilityMinor(int device_id)</div><div class="ttdoc">Determine minor version number of the gpu&amp;#39;s cuda compute architecture. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:473</div></div>
<div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4_html_acf8d06465837aa6ee31e125e6eeda87c"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html#acf8d06465837aa6ee31e125e6eeda87c">mxnet::common::cuda::CublasType&lt; mshadow::half::half_t &gt;::one</a></div><div class="ttdeci">static const mshadow::half::half_t one</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:228</div></div>
<div class="ttc" id="namespacemshadow_html"><div class="ttname"><a href="namespacemshadow.html">mshadow</a></div><div class="ttdoc">overloaded + operator between half_t and bf16_t </div><div class="ttdef"><b>Definition:</b> base.h:334</div></div>
<div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4_html_a735caccec4d080a0fd7e1bf88a727955"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html#a735caccec4d080a0fd7e1bf88a727955">mxnet::common::cuda::CublasType&lt; float &gt;::ScaleType</a></div><div class="ttdeci">float ScaleType</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:207</div></div>
<div class="ttc" id="namespacemshadow_1_1expr_html_afc62edfb800bb19e201b20b444831af3"><div class="ttname"><a href="namespacemshadow_1_1expr.html#afc62edfb800bb19e201b20b444831af3">mshadow::expr::transpose</a></div><div class="ttdeci">TransposeExExp&lt; SrcExp, DType, ExpInfo&lt; SrcExp &gt;::kDim &gt; transpose(const Exp&lt; SrcExp, DType, etype &gt; &amp;src, Shape&lt; ExpInfo&lt; SrcExp &gt;::kDim &gt; axes)</div><div class="ttdoc">a expression that reshapes a tensor to another shape </div><div class="ttdef"><b>Definition:</b> transpose.h:76</div></div>
<div class="ttc" id="namespacemshadow_html_a936bbfe6aeead8902973c098b87f18c1a37ab9e42757689b17620f5728296d5d4"><div class="ttname"><a href="namespacemshadow.html#a936bbfe6aeead8902973c098b87f18c1a37ab9e42757689b17620f5728296d5d4">mshadow::kFloat16</a></div><div class="ttdef"><b>Definition:</b> base.h:361</div></div>
<div class="ttc" id="cuda__utils_8h_html_a06cc7d24ca66505e69f5ad40009f5e8d"><div class="ttname"><a href="cuda__utils_8h.html#a06cc7d24ca66505e69f5ad40009f5e8d">CUDA_CALL</a></div><div class="ttdeci">#define CUDA_CALL(func)</div><div class="ttdoc">Protected CUDA call. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:97</div></div>
<div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4_html_a46da9bddaa921bd38ec1c90a975972fe"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html#a46da9bddaa921bd38ec1c90a975972fe">mxnet::common::cuda::CublasType&lt; double &gt;::ScaleType</a></div><div class="ttdeci">double ScaleType</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:217</div></div>
<div class="ttc" id="3rdparty_2mshadow_2mshadow_2base_8h_html"><div class="ttname"><a href="3rdparty_2mshadow_2mshadow_2base_8h.html">base.h</a></div></div>
<div class="ttc" id="namespacemxnet_1_1common_1_1cuda_html_a9feee613a4f16a954dd68e55345a72ac"><div class="ttname"><a href="namespacemxnet_1_1common_1_1cuda.html#a9feee613a4f16a954dd68e55345a72ac">mxnet::common::cuda::CublasGetErrorString</a></div><div class="ttdeci">const char * CublasGetErrorString(cublasStatus_t error)</div><div class="ttdoc">Get string representation of cuBLAS errors. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:257</div></div>
<div class="ttc" id="namespacemxnet_1_1common_1_1cuda_html_a7608f1c1700694e453f37cfadfe9e30e"><div class="ttname"><a href="namespacemxnet_1_1common_1_1cuda.html#a7608f1c1700694e453f37cfadfe9e30e">mxnet::common::cuda::get_rows_per_block</a></div><div class="ttdeci">int get_rows_per_block(size_t row_size, int num_threads_per_block)</div><div class="ttdoc">Determine how many rows in a 2D matrix should a block of threads handle based on the row size and the...</div></div>
<div class="ttc" id="parameter_8h_html"><div class="ttname"><a href="parameter_8h.html">parameter.h</a></div><div class="ttdoc">Provide lightweight util to do parameter setup and checking. </div></div>
<div class="ttc" id="cuda__utils_8h_html_a82a24f3db4d0c91374cb3fe7d413f603"><div class="ttname"><a href="cuda__utils_8h.html#a82a24f3db4d0c91374cb3fe7d413f603">SupportsCooperativeLaunch</a></div><div class="ttdeci">bool SupportsCooperativeLaunch(int device_id)</div><div class="ttdoc">Return whether the GPU device_id supports cooperative-group kernel launching. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:518</div></div>
<div class="ttc" id="libinfo_8h_html"><div class="ttname"><a href="libinfo_8h.html">libinfo.h</a></div><div class="ttdoc">get features of the MXNet library at runtime </div></div>
</div><!-- fragment --></div><!-- contents -->
<!-- start footer part -->
<hr class="footer"/><address class="footer"><small>
Generated on Thu Jan 5 2023 00:58:42 for mxnet by &#160;<a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/>
</a> 1.8.13
</small></address>
</body>
</html>