| <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> |
| <html xmlns="http://www.w3.org/1999/xhtml"> |
| <head> |
| <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> |
| <meta http-equiv="X-UA-Compatible" content="IE=9"/> |
| <meta name="generator" content="Doxygen 1.8.13"/> |
| <meta name="viewport" content="width=device-width, initial-scale=1"/> |
| <title>mxnet: /work/mxnet/src/common/cuda_utils.h Source File</title> |
| <link href="tabs.css" rel="stylesheet" type="text/css"/> |
| <script type="text/javascript" src="jquery.js"></script> |
| <script type="text/javascript" src="dynsections.js"></script> |
| <link href="search/search.css" rel="stylesheet" type="text/css"/> |
| <script type="text/javascript" src="search/searchdata.js"></script> |
| <script type="text/javascript" src="search/search.js"></script> |
| <link href="doxygen.css" rel="stylesheet" type="text/css" /> |
| </head> |
| <body> |
| <div id="top"><!-- do not remove this div, it is closed by doxygen! --> |
| <div id="titlearea"> |
| <table cellspacing="0" cellpadding="0"> |
| <tbody> |
| <tr style="height: 56px;"> |
| <td id="projectalign" style="padding-left: 0.5em;"> |
| <div id="projectname">mxnet |
| </div> |
| </td> |
| </tr> |
| </tbody> |
| </table> |
| </div> |
| <!-- end header part --> |
| <!-- Generated by Doxygen 1.8.13 --> |
| <script type="text/javascript"> |
| var searchBox = new SearchBox("searchBox", "search",false,'Search'); |
| </script> |
| <script type="text/javascript" src="menudata.js"></script> |
| <script type="text/javascript" src="menu.js"></script> |
| <script type="text/javascript"> |
| $(function() { |
| initMenu('',true,false,'search.php','Search'); |
| $(document).ready(function() { init_search(); }); |
| }); |
| </script> |
| <div id="main-nav"></div> |
| <!-- window showing the filter options --> |
| <div id="MSearchSelectWindow" |
| onmouseover="return searchBox.OnSearchSelectShow()" |
| onmouseout="return searchBox.OnSearchSelectHide()" |
| onkeydown="return searchBox.OnSearchSelectKey(event)"> |
| </div> |
| |
| <!-- iframe showing the search results (closed by default) --> |
| <div id="MSearchResultsWindow"> |
| <iframe src="javascript:void(0)" frameborder="0" |
| name="MSearchResults" id="MSearchResults"> |
| </iframe> |
| </div> |
| |
| <div id="nav-path" class="navpath"> |
| <ul> |
| <li class="navelem"><a class="el" href="dir_68267d1309a1af8e8297ef4c3efbcdba.html">src</a></li><li class="navelem"><a class="el" href="dir_fdedb0aba14d44ce9d99bc100e026e6a.html">common</a></li> </ul> |
| </div> |
| </div><!-- top --> |
| <div class="header"> |
| <div class="headertitle"> |
| <div class="title">cuda_utils.h</div> </div> |
| </div><!--header--> |
| <div class="contents"> |
| <a href="cuda__utils_8h.html">Go to the documentation of this file.</a><div class="fragment"><div class="line"><a name="l00001"></a><span class="lineno"> 1</span> <span class="comment">/*</span></div><div class="line"><a name="l00002"></a><span class="lineno"> 2</span> <span class="comment"> * Licensed to the Apache Software Foundation (ASF) under one</span></div><div class="line"><a name="l00003"></a><span class="lineno"> 3</span> <span class="comment"> * or more contributor license agreements. See the NOTICE file</span></div><div class="line"><a name="l00004"></a><span class="lineno"> 4</span> <span class="comment"> * distributed with this work for additional information</span></div><div class="line"><a name="l00005"></a><span class="lineno"> 5</span> <span class="comment"> * regarding copyright ownership. The ASF licenses this file</span></div><div class="line"><a name="l00006"></a><span class="lineno"> 6</span> <span class="comment"> * to you under the Apache License, Version 2.0 (the</span></div><div class="line"><a name="l00007"></a><span class="lineno"> 7</span> <span class="comment"> * "License"); you may not use this file except in compliance</span></div><div class="line"><a name="l00008"></a><span class="lineno"> 8</span> <span class="comment"> * with the License. You may obtain a copy of the License at</span></div><div class="line"><a name="l00009"></a><span class="lineno"> 9</span> <span class="comment"> *</span></div><div class="line"><a name="l00010"></a><span class="lineno"> 10</span> <span class="comment"> * http://www.apache.org/licenses/LICENSE-2.0</span></div><div class="line"><a name="l00011"></a><span class="lineno"> 11</span> <span class="comment"> *</span></div><div class="line"><a name="l00012"></a><span class="lineno"> 12</span> <span class="comment"> * Unless required by applicable law or agreed to in writing,</span></div><div class="line"><a name="l00013"></a><span class="lineno"> 13</span> <span class="comment"> * software distributed under the License is distributed on an</span></div><div class="line"><a name="l00014"></a><span class="lineno"> 14</span> <span class="comment"> * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY</span></div><div class="line"><a name="l00015"></a><span class="lineno"> 15</span> <span class="comment"> * KIND, either express or implied. See the License for the</span></div><div class="line"><a name="l00016"></a><span class="lineno"> 16</span> <span class="comment"> * specific language governing permissions and limitations</span></div><div class="line"><a name="l00017"></a><span class="lineno"> 17</span> <span class="comment"> * under the License.</span></div><div class="line"><a name="l00018"></a><span class="lineno"> 18</span> <span class="comment"> */</span></div><div class="line"><a name="l00019"></a><span class="lineno"> 19</span> </div><div class="line"><a name="l00024"></a><span class="lineno"> 24</span> <span class="preprocessor">#ifndef MXNET_COMMON_CUDA_UTILS_H_</span></div><div class="line"><a name="l00025"></a><span class="lineno"> 25</span> <span class="preprocessor">#define MXNET_COMMON_CUDA_UTILS_H_</span></div><div class="line"><a name="l00026"></a><span class="lineno"> 26</span> </div><div class="line"><a name="l00027"></a><span class="lineno"> 27</span> <span class="preprocessor">#include <dmlc/logging.h></span></div><div class="line"><a name="l00028"></a><span class="lineno"> 28</span> <span class="preprocessor">#include <<a class="code" href="parameter_8h.html">dmlc/parameter.h</a>></span></div><div class="line"><a name="l00029"></a><span class="lineno"> 29</span> <span class="preprocessor">#include <<a class="code" href="optional_8h.html">dmlc/optional.h</a>></span></div><div class="line"><a name="l00030"></a><span class="lineno"> 30</span> <span class="preprocessor">#include <<a class="code" href="3rdparty_2mshadow_2mshadow_2base_8h.html">mshadow/base.h</a>></span></div><div class="line"><a name="l00031"></a><span class="lineno"> 31</span> <span class="preprocessor">#include <<a class="code" href="libinfo_8h.html">mxnet/libinfo.h</a>></span></div><div class="line"><a name="l00032"></a><span class="lineno"> 32</span> </div><div class="line"><a name="l00034"></a><span class="lineno"> 34</span> <span class="preprocessor">#ifdef __JETBRAINS_IDE__</span></div><div class="line"><a name="l00035"></a><span class="lineno"> 35</span> <span class="preprocessor">#define __CUDACC__ 1</span></div><div class="line"><a name="l00036"></a><span class="lineno"> 36</span> <span class="preprocessor">#define __host__</span></div><div class="line"><a name="l00037"></a><span class="lineno"> 37</span> <span class="preprocessor">#define __device__</span></div><div class="line"><a name="l00038"></a><span class="lineno"> 38</span> <span class="preprocessor">#define __global__</span></div><div class="line"><a name="l00039"></a><span class="lineno"> 39</span> <span class="preprocessor">#define __forceinline__</span></div><div class="line"><a name="l00040"></a><span class="lineno"> 40</span> <span class="preprocessor">#define __shared__</span></div><div class="line"><a name="l00041"></a><span class="lineno"> 41</span> <span class="keyword">inline</span> <span class="keywordtype">void</span> __syncthreads() {}</div><div class="line"><a name="l00042"></a><span class="lineno"> 42</span> <span class="keyword">inline</span> <span class="keywordtype">void</span> __threadfence_block() {}</div><div class="line"><a name="l00043"></a><span class="lineno"> 43</span> <span class="keyword">template</span><<span class="keyword">class</span> T> <span class="keyword">inline</span> T __clz(<span class="keyword">const</span> T val) { <span class="keywordflow">return</span> val; }</div><div class="line"><a name="l00044"></a><span class="lineno"> 44</span> <span class="keyword">struct </span>__cuda_fake_struct { <span class="keywordtype">int</span> x; <span class="keywordtype">int</span> y; <span class="keywordtype">int</span> z; };</div><div class="line"><a name="l00045"></a><span class="lineno"> 45</span> <span class="keyword">extern</span> __cuda_fake_struct blockDim;</div><div class="line"><a name="l00046"></a><span class="lineno"> 46</span> <span class="keyword">extern</span> __cuda_fake_struct threadIdx;</div><div class="line"><a name="l00047"></a><span class="lineno"> 47</span> <span class="keyword">extern</span> __cuda_fake_struct blockIdx;</div><div class="line"><a name="l00048"></a><span class="lineno"> 48</span> <span class="preprocessor">#endif</span></div><div class="line"><a name="l00049"></a><span class="lineno"> 49</span> </div><div class="line"><a name="l00050"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a2117b58e19182dff91ad3558e650541d"> 50</a></span> <span class="preprocessor">#define QUOTE(x) #x</span></div><div class="line"><a name="l00051"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a257a331aabc15f6c701df3cff96f1b10"> 51</a></span> <span class="preprocessor">#define QUOTEVALUE(x) QUOTE(x)</span></div><div class="line"><a name="l00052"></a><span class="lineno"> 52</span> </div><div class="line"><a name="l00053"></a><span class="lineno"> 53</span> <span class="preprocessor">#if MXNET_USE_CUDA</span></div><div class="line"><a name="l00054"></a><span class="lineno"> 54</span> </div><div class="line"><a name="l00055"></a><span class="lineno"> 55</span> <span class="preprocessor">#include <cuda_runtime.h></span></div><div class="line"><a name="l00056"></a><span class="lineno"> 56</span> <span class="preprocessor">#include <cublas_v2.h></span></div><div class="line"><a name="l00057"></a><span class="lineno"> 57</span> <span class="preprocessor">#include <curand.h></span></div><div class="line"><a name="l00058"></a><span class="lineno"> 58</span> </div><div class="line"><a name="l00059"></a><span class="lineno"> 59</span> <span class="preprocessor">#include <vector></span></div><div class="line"><a name="l00060"></a><span class="lineno"> 60</span> </div><div class="line"><a name="l00061"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#ac2d16cdf196c75879d4acda60406e0ef"> 61</a></span> <span class="preprocessor">#define STATIC_ASSERT_CUDA_VERSION_GE(min_version) \</span></div><div class="line"><a name="l00062"></a><span class="lineno"> 62</span> <span class="preprocessor"> static_assert(CUDA_VERSION >= min_version, "Compiled-against CUDA version " \</span></div><div class="line"><a name="l00063"></a><span class="lineno"> 63</span> <span class="preprocessor"> QUOTEVALUE(CUDA_VERSION) " is too old, please upgrade system to version " \</span></div><div class="line"><a name="l00064"></a><span class="lineno"> 64</span> <span class="preprocessor"> QUOTEVALUE(min_version) " or later.")</span></div><div class="line"><a name="l00065"></a><span class="lineno"> 65</span> </div><div class="line"><a name="l00070"></a><span class="lineno"> 70</span> <span class="preprocessor">#ifdef __CUDACC__</span></div><div class="line"><a name="l00071"></a><span class="lineno"> 71</span> <span class="keyword">inline</span> __device__ <span class="keywordtype">bool</span> __is_supported_cuda_architecture() {</div><div class="line"><a name="l00072"></a><span class="lineno"> 72</span> <span class="preprocessor">#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 300</span></div><div class="line"><a name="l00073"></a><span class="lineno"> 73</span> <span class="preprocessor">#error "Fermi and earlier GPU architectures are not supported (architecture versions less than 3.0)"</span></div><div class="line"><a name="l00074"></a><span class="lineno"> 74</span>  <span class="keywordflow">return</span> <span class="keyword">false</span>;</div><div class="line"><a name="l00075"></a><span class="lineno"> 75</span> <span class="preprocessor">#else</span></div><div class="line"><a name="l00076"></a><span class="lineno"> 76</span>  <span class="keywordflow">return</span> <span class="keyword">true</span>;</div><div class="line"><a name="l00077"></a><span class="lineno"> 77</span> <span class="preprocessor">#endif // __CUDA_ARCH__ < 300</span></div><div class="line"><a name="l00078"></a><span class="lineno"> 78</span> }</div><div class="line"><a name="l00079"></a><span class="lineno"> 79</span> <span class="preprocessor">#endif // __CUDACC__</span></div><div class="line"><a name="l00080"></a><span class="lineno"> 80</span> </div><div class="line"><a name="l00085"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#afc69a418242c5b851993bc2307b1c897"> 85</a></span> <span class="preprocessor">#define CHECK_CUDA_ERROR(msg) \</span></div><div class="line"><a name="l00086"></a><span class="lineno"> 86</span> <span class="preprocessor"> { \</span></div><div class="line"><a name="l00087"></a><span class="lineno"> 87</span> <span class="preprocessor"> cudaError_t e = cudaGetLastError(); \</span></div><div class="line"><a name="l00088"></a><span class="lineno"> 88</span> <span class="preprocessor"> CHECK_EQ(e, cudaSuccess) << (msg) << " CUDA: " << cudaGetErrorString(e); \</span></div><div class="line"><a name="l00089"></a><span class="lineno"> 89</span> <span class="preprocessor"> }</span></div><div class="line"><a name="l00090"></a><span class="lineno"> 90</span> </div><div class="line"><a name="l00097"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a06cc7d24ca66505e69f5ad40009f5e8d"> 97</a></span> <span class="preprocessor">#define CUDA_CALL(func) \</span></div><div class="line"><a name="l00098"></a><span class="lineno"> 98</span> <span class="preprocessor"> { \</span></div><div class="line"><a name="l00099"></a><span class="lineno"> 99</span> <span class="preprocessor"> cudaError_t e = (func); \</span></div><div class="line"><a name="l00100"></a><span class="lineno"> 100</span> <span class="preprocessor"> CHECK(e == cudaSuccess || e == cudaErrorCudartUnloading) \</span></div><div class="line"><a name="l00101"></a><span class="lineno"> 101</span> <span class="preprocessor"> << "CUDA: " << cudaGetErrorString(e); \</span></div><div class="line"><a name="l00102"></a><span class="lineno"> 102</span> <span class="preprocessor"> }</span></div><div class="line"><a name="l00103"></a><span class="lineno"> 103</span> </div><div class="line"><a name="l00110"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a685d7ca3c9370ff471665abcacdeb381"> 110</a></span> <span class="preprocessor">#define CUBLAS_CALL(func) \</span></div><div class="line"><a name="l00111"></a><span class="lineno"> 111</span> <span class="preprocessor"> { \</span></div><div class="line"><a name="l00112"></a><span class="lineno"> 112</span> <span class="preprocessor"> cublasStatus_t e = (func); \</span></div><div class="line"><a name="l00113"></a><span class="lineno"> 113</span> <span class="preprocessor"> CHECK_EQ(e, CUBLAS_STATUS_SUCCESS) \</span></div><div class="line"><a name="l00114"></a><span class="lineno"> 114</span> <span class="preprocessor"> << "cuBLAS: " << mxnet::common::cuda::CublasGetErrorString(e); \</span></div><div class="line"><a name="l00115"></a><span class="lineno"> 115</span> <span class="preprocessor"> }</span></div><div class="line"><a name="l00116"></a><span class="lineno"> 116</span> </div><div class="line"><a name="l00123"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#ab38940ff6950f84102baa4573675b670"> 123</a></span> <span class="preprocessor">#define CUSOLVER_CALL(func) \</span></div><div class="line"><a name="l00124"></a><span class="lineno"> 124</span> <span class="preprocessor"> { \</span></div><div class="line"><a name="l00125"></a><span class="lineno"> 125</span> <span class="preprocessor"> cusolverStatus_t e = (func); \</span></div><div class="line"><a name="l00126"></a><span class="lineno"> 126</span> <span class="preprocessor"> CHECK_EQ(e, CUSOLVER_STATUS_SUCCESS) \</span></div><div class="line"><a name="l00127"></a><span class="lineno"> 127</span> <span class="preprocessor"> << "cuSolver: " << mxnet::common::cuda::CusolverGetErrorString(e); \</span></div><div class="line"><a name="l00128"></a><span class="lineno"> 128</span> <span class="preprocessor"> }</span></div><div class="line"><a name="l00129"></a><span class="lineno"> 129</span> </div><div class="line"><a name="l00136"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a82d7233550780a8c186e79c24aed8406"> 136</a></span> <span class="preprocessor">#define CURAND_CALL(func) \</span></div><div class="line"><a name="l00137"></a><span class="lineno"> 137</span> <span class="preprocessor"> { \</span></div><div class="line"><a name="l00138"></a><span class="lineno"> 138</span> <span class="preprocessor"> curandStatus_t e = (func); \</span></div><div class="line"><a name="l00139"></a><span class="lineno"> 139</span> <span class="preprocessor"> CHECK_EQ(e, CURAND_STATUS_SUCCESS) \</span></div><div class="line"><a name="l00140"></a><span class="lineno"> 140</span> <span class="preprocessor"> << "cuRAND: " << mxnet::common::cuda::CurandGetErrorString(e); \</span></div><div class="line"><a name="l00141"></a><span class="lineno"> 141</span> <span class="preprocessor"> }</span></div><div class="line"><a name="l00142"></a><span class="lineno"> 142</span> </div><div class="line"><a name="l00149"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a63b6d263b94df9e33474894ad02b792d"> 149</a></span> <span class="preprocessor">#define NVRTC_CALL(x) \</span></div><div class="line"><a name="l00150"></a><span class="lineno"> 150</span> <span class="preprocessor"> { \</span></div><div class="line"><a name="l00151"></a><span class="lineno"> 151</span> <span class="preprocessor"> nvrtcResult result = x; \</span></div><div class="line"><a name="l00152"></a><span class="lineno"> 152</span> <span class="preprocessor"> CHECK_EQ(result, NVRTC_SUCCESS) \</span></div><div class="line"><a name="l00153"></a><span class="lineno"> 153</span> <span class="preprocessor"> << #x " failed with error " \</span></div><div class="line"><a name="l00154"></a><span class="lineno"> 154</span> <span class="preprocessor"> << nvrtcGetErrorString(result); \</span></div><div class="line"><a name="l00155"></a><span class="lineno"> 155</span> <span class="preprocessor"> }</span></div><div class="line"><a name="l00156"></a><span class="lineno"> 156</span> </div><div class="line"><a name="l00163"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a0d9b08b9ef45122c54bf5a121aeab5c3"> 163</a></span> <span class="preprocessor">#define CUDA_DRIVER_CALL(func) \</span></div><div class="line"><a name="l00164"></a><span class="lineno"> 164</span> <span class="preprocessor"> { \</span></div><div class="line"><a name="l00165"></a><span class="lineno"> 165</span> <span class="preprocessor"> CUresult e = (func); \</span></div><div class="line"><a name="l00166"></a><span class="lineno"> 166</span> <span class="preprocessor"> if (e != CUDA_SUCCESS) { \</span></div><div class="line"><a name="l00167"></a><span class="lineno"> 167</span> <span class="preprocessor"> char const * err_msg = nullptr; \</span></div><div class="line"><a name="l00168"></a><span class="lineno"> 168</span> <span class="preprocessor"> if (cuGetErrorString(e, &err_msg) == CUDA_ERROR_INVALID_VALUE) { \</span></div><div class="line"><a name="l00169"></a><span class="lineno"> 169</span> <span class="preprocessor"> LOG(FATAL) << "CUDA Driver: Unknown error " << e; \</span></div><div class="line"><a name="l00170"></a><span class="lineno"> 170</span> <span class="preprocessor"> } else { \</span></div><div class="line"><a name="l00171"></a><span class="lineno"> 171</span> <span class="preprocessor"> LOG(FATAL) << "CUDA Driver: " << err_msg; \</span></div><div class="line"><a name="l00172"></a><span class="lineno"> 172</span> <span class="preprocessor"> } \</span></div><div class="line"><a name="l00173"></a><span class="lineno"> 173</span> <span class="preprocessor"> } \</span></div><div class="line"><a name="l00174"></a><span class="lineno"> 174</span> <span class="preprocessor"> }</span></div><div class="line"><a name="l00175"></a><span class="lineno"> 175</span> </div><div class="line"><a name="l00176"></a><span class="lineno"> 176</span> </div><div class="line"><a name="l00177"></a><span class="lineno"> 177</span> <span class="preprocessor">#if !defined(_MSC_VER)</span></div><div class="line"><a name="l00178"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a685e3713856baaafb1d4edea43725c83"> 178</a></span> <span class="preprocessor">#define CUDA_UNROLL _Pragma("unroll")</span></div><div class="line"><a name="l00179"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#addb314f15d765a2ba72ae37dab23c03b"> 179</a></span> <span class="preprocessor">#define CUDA_NOUNROLL _Pragma("nounroll")</span></div><div class="line"><a name="l00180"></a><span class="lineno"> 180</span> <span class="preprocessor">#else</span></div><div class="line"><a name="l00181"></a><span class="lineno"> 181</span> <span class="preprocessor">#define CUDA_UNROLL</span></div><div class="line"><a name="l00182"></a><span class="lineno"> 182</span> <span class="preprocessor">#define CUDA_NOUNROLL</span></div><div class="line"><a name="l00183"></a><span class="lineno"> 183</span> <span class="preprocessor">#endif</span></div><div class="line"><a name="l00184"></a><span class="lineno"> 184</span> </div><div class="line"><a name="l00185"></a><span class="lineno"> 185</span> <span class="keyword">namespace </span><a class="code" href="namespacemxnet.html">mxnet</a> {</div><div class="line"><a name="l00186"></a><span class="lineno"> 186</span> <span class="keyword">namespace </span>common {</div><div class="line"><a name="l00188"></a><span class="lineno"><a class="line" href="namespacemxnet_1_1common_1_1cuda.html"> 188</a></span> <span class="keyword">namespace </span>cuda {</div><div class="line"><a name="l00192"></a><span class="lineno"> 192</span> <span class="keyword">template</span><<span class="keyword">typename</span> DType></div><div class="line"><a name="l00193"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType.html"> 193</a></span> <span class="keyword">struct </span><a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType.html">CublasType</a>;</div><div class="line"><a name="l00194"></a><span class="lineno"> 194</span> </div><div class="line"><a name="l00195"></a><span class="lineno"> 195</span> <span class="comment">// With CUDA v8, cuBLAS adopted use of cudaDataType_t instead of its own</span></div><div class="line"><a name="l00196"></a><span class="lineno"> 196</span> <span class="comment">// datatype cublasDataType_t. The older cudaDataType_t values could be</span></div><div class="line"><a name="l00197"></a><span class="lineno"> 197</span> <span class="comment">// included below, but since this class was introduced to support the cuBLAS v8</span></div><div class="line"><a name="l00198"></a><span class="lineno"> 198</span> <span class="comment">// call cublasGemmEx(), burdening the class with the legacy type values</span></div><div class="line"><a name="l00199"></a><span class="lineno"> 199</span> <span class="comment">// was not needed.</span></div><div class="line"><a name="l00200"></a><span class="lineno"> 200</span> </div><div class="line"><a name="l00201"></a><span class="lineno"> 201</span> <span class="keyword">template</span><></div><div class="line"><a name="l00202"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html"> 202</a></span> <span class="keyword">struct </span><a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType.html">CublasType</a><float> {</div><div class="line"><a name="l00203"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html#a36982d0c4b16568b641c6fcd0afec49b"> 203</a></span>  <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">int</span> kFlag = <a class="code" href="namespacemshadow.html#a936bbfe6aeead8902973c098b87f18c1a404a5fd26328cf46170f6eb3424c9633">mshadow::kFloat32</a>;</div><div class="line"><a name="l00204"></a><span class="lineno"> 204</span> <span class="preprocessor">#if CUDA_VERSION >= 8000</span></div><div class="line"><a name="l00205"></a><span class="lineno"> 205</span>  <span class="keyword">static</span> <span class="keyword">const</span> cudaDataType_t kCudaFlag = CUDA_R_32F;</div><div class="line"><a name="l00206"></a><span class="lineno"> 206</span> <span class="preprocessor">#endif</span></div><div class="line"><a name="l00207"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html#a735caccec4d080a0fd7e1bf88a727955"> 207</a></span>  <span class="keyword">typedef</span> <span class="keywordtype">float</span> <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html#a735caccec4d080a0fd7e1bf88a727955">ScaleType</a>;</div><div class="line"><a name="l00208"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html#a437fb574fefbe87d2add1289074b194a"> 208</a></span>  <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">float</span> <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html#a437fb574fefbe87d2add1289074b194a">one</a>;</div><div class="line"><a name="l00209"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html#a98a73866e9513d63627f935531456ca7"> 209</a></span>  <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">float</span> <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html#a98a73866e9513d63627f935531456ca7">zero</a>;</div><div class="line"><a name="l00210"></a><span class="lineno"> 210</span> };</div><div class="line"><a name="l00211"></a><span class="lineno"> 211</span> <span class="keyword">template</span><></div><div class="line"><a name="l00212"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html"> 212</a></span> <span class="keyword">struct </span><a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType.html">CublasType</a><double> {</div><div class="line"><a name="l00213"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html#a479d4f2ff7b9186dfe4d81cd6ce36d4c"> 213</a></span>  <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">int</span> kFlag = <a class="code" href="namespacemshadow.html#a936bbfe6aeead8902973c098b87f18c1a1f5a1c62216cbd2200443d501924cf28">mshadow::kFloat64</a>;</div><div class="line"><a name="l00214"></a><span class="lineno"> 214</span> <span class="preprocessor">#if CUDA_VERSION >= 8000</span></div><div class="line"><a name="l00215"></a><span class="lineno"> 215</span>  <span class="keyword">static</span> <span class="keyword">const</span> cudaDataType_t kCudaFlag = CUDA_R_64F;</div><div class="line"><a name="l00216"></a><span class="lineno"> 216</span> <span class="preprocessor">#endif</span></div><div class="line"><a name="l00217"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html#a46da9bddaa921bd38ec1c90a975972fe"> 217</a></span>  <span class="keyword">typedef</span> <span class="keywordtype">double</span> <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html#a46da9bddaa921bd38ec1c90a975972fe">ScaleType</a>;</div><div class="line"><a name="l00218"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html#a17c4026782d6a86b7d11aae44b684969"> 218</a></span>  <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">double</span> <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html#a17c4026782d6a86b7d11aae44b684969">one</a>;</div><div class="line"><a name="l00219"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html#acb859823c71c7c2aeeb55de510dcb1b4"> 219</a></span>  <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">double</span> <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html#acb859823c71c7c2aeeb55de510dcb1b4">zero</a>;</div><div class="line"><a name="l00220"></a><span class="lineno"> 220</span> };</div><div class="line"><a name="l00221"></a><span class="lineno"> 221</span> <span class="keyword">template</span><></div><div class="line"><a name="l00222"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html"> 222</a></span> <span class="keyword">struct </span><a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType.html">CublasType</a><<a class="code" href="namespacemshadow.html">mshadow</a>::half::half_t> {</div><div class="line"><a name="l00223"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html#aff8ea7d6270e903b93102223dd3541ba"> 223</a></span>  <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">int</span> kFlag = <a class="code" href="namespacemshadow.html#a936bbfe6aeead8902973c098b87f18c1a37ab9e42757689b17620f5728296d5d4">mshadow::kFloat16</a>;</div><div class="line"><a name="l00224"></a><span class="lineno"> 224</span> <span class="preprocessor">#if CUDA_VERSION >= 8000</span></div><div class="line"><a name="l00225"></a><span class="lineno"> 225</span>  <span class="keyword">static</span> <span class="keyword">const</span> cudaDataType_t kCudaFlag = CUDA_R_16F;</div><div class="line"><a name="l00226"></a><span class="lineno"> 226</span> <span class="preprocessor">#endif</span></div><div class="line"><a name="l00227"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html#a707d99741473be6edc5f4c345690e9ee"> 227</a></span>  <span class="keyword">typedef</span> <span class="keywordtype">float</span> <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html#a707d99741473be6edc5f4c345690e9ee">ScaleType</a>;</div><div class="line"><a name="l00228"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html#acf8d06465837aa6ee31e125e6eeda87c"> 228</a></span>  <span class="keyword">static</span> <span class="keyword">const</span> mshadow::half::half_t <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html#acf8d06465837aa6ee31e125e6eeda87c">one</a>;</div><div class="line"><a name="l00229"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html#ae8222ef1a6cba23c5f196393d74c45ce"> 229</a></span>  <span class="keyword">static</span> <span class="keyword">const</span> mshadow::half::half_t <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html#ae8222ef1a6cba23c5f196393d74c45ce">zero</a>;</div><div class="line"><a name="l00230"></a><span class="lineno"> 230</span> };</div><div class="line"><a name="l00231"></a><span class="lineno"> 231</span> <span class="keyword">template</span><></div><div class="line"><a name="l00232"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01uint8__t_01_4.html"> 232</a></span> <span class="keyword">struct </span><a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType.html">CublasType</a><uint8_t> {</div><div class="line"><a name="l00233"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01uint8__t_01_4.html#aaf4a22c7533da6a79bf1c06e0c937cc5"> 233</a></span>  <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">int</span> kFlag = <a class="code" href="namespacemshadow.html#a936bbfe6aeead8902973c098b87f18c1a1a39d2f8230da3cb53528904c8a5fff0">mshadow::kUint8</a>;</div><div class="line"><a name="l00234"></a><span class="lineno"> 234</span> <span class="preprocessor">#if CUDA_VERSION >= 8000</span></div><div class="line"><a name="l00235"></a><span class="lineno"> 235</span>  <span class="keyword">static</span> <span class="keyword">const</span> cudaDataType_t kCudaFlag = CUDA_R_8I;</div><div class="line"><a name="l00236"></a><span class="lineno"> 236</span> <span class="preprocessor">#endif</span></div><div class="line"><a name="l00237"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01uint8__t_01_4.html#a3af01d0a12763530b9568e836c4655e0"> 237</a></span>  <span class="keyword">typedef</span> uint8_t <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01uint8__t_01_4.html#a3af01d0a12763530b9568e836c4655e0">ScaleType</a>;</div><div class="line"><a name="l00238"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01uint8__t_01_4.html#a3fde539928c0e7b776dce38ffbf50e94"> 238</a></span>  <span class="keyword">static</span> <span class="keyword">const</span> uint8_t one = 1;</div><div class="line"><a name="l00239"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01uint8__t_01_4.html#a05d6f7ce44f65f2dee8d919005359ad8"> 239</a></span>  <span class="keyword">static</span> <span class="keyword">const</span> uint8_t zero = 0;</div><div class="line"><a name="l00240"></a><span class="lineno"> 240</span> };</div><div class="line"><a name="l00241"></a><span class="lineno"> 241</span> <span class="keyword">template</span><></div><div class="line"><a name="l00242"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01int32__t_01_4.html"> 242</a></span> <span class="keyword">struct </span><a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType.html">CublasType</a><int32_t> {</div><div class="line"><a name="l00243"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01int32__t_01_4.html#ac12d3826bcfd3207cc9ccec15365630e"> 243</a></span>  <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">int</span> kFlag = <a class="code" href="namespacemshadow.html#a936bbfe6aeead8902973c098b87f18c1a4fbb02e389c3126918b505cd01188368">mshadow::kInt32</a>;</div><div class="line"><a name="l00244"></a><span class="lineno"> 244</span> <span class="preprocessor">#if CUDA_VERSION >= 8000</span></div><div class="line"><a name="l00245"></a><span class="lineno"> 245</span>  <span class="keyword">static</span> <span class="keyword">const</span> cudaDataType_t kCudaFlag = CUDA_R_32I;</div><div class="line"><a name="l00246"></a><span class="lineno"> 246</span> <span class="preprocessor">#endif</span></div><div class="line"><a name="l00247"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01int32__t_01_4.html#a237f23f560dad8c0299c11a14f1dee23"> 247</a></span>  <span class="keyword">typedef</span> int32_t <a class="code" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01int32__t_01_4.html#a237f23f560dad8c0299c11a14f1dee23">ScaleType</a>;</div><div class="line"><a name="l00248"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01int32__t_01_4.html#aaed4ff3ebff77d570c87a02ef40c90c0"> 248</a></span>  <span class="keyword">static</span> <span class="keyword">const</span> int32_t one = 1;</div><div class="line"><a name="l00249"></a><span class="lineno"><a class="line" href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01int32__t_01_4.html#afd9ad4f6ea376d89dfb5f9c77edf8eba"> 249</a></span>  <span class="keyword">static</span> <span class="keyword">const</span> int32_t zero = 0;</div><div class="line"><a name="l00250"></a><span class="lineno"> 250</span> };</div><div class="line"><a name="l00251"></a><span class="lineno"> 251</span> </div><div class="line"><a name="l00257"></a><span class="lineno"><a class="line" href="namespacemxnet_1_1common_1_1cuda.html#a9feee613a4f16a954dd68e55345a72ac"> 257</a></span> <span class="keyword">inline</span> <span class="keyword">const</span> <span class="keywordtype">char</span>* <a class="code" href="namespacemxnet_1_1common_1_1cuda.html#a9feee613a4f16a954dd68e55345a72ac">CublasGetErrorString</a>(cublasStatus_t error) {</div><div class="line"><a name="l00258"></a><span class="lineno"> 258</span>  <span class="keywordflow">switch</span> (error) {</div><div class="line"><a name="l00259"></a><span class="lineno"> 259</span>  <span class="keywordflow">case</span> CUBLAS_STATUS_SUCCESS:</div><div class="line"><a name="l00260"></a><span class="lineno"> 260</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CUBLAS_STATUS_SUCCESS"</span>;</div><div class="line"><a name="l00261"></a><span class="lineno"> 261</span>  <span class="keywordflow">case</span> CUBLAS_STATUS_NOT_INITIALIZED:</div><div class="line"><a name="l00262"></a><span class="lineno"> 262</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CUBLAS_STATUS_NOT_INITIALIZED"</span>;</div><div class="line"><a name="l00263"></a><span class="lineno"> 263</span>  <span class="keywordflow">case</span> CUBLAS_STATUS_ALLOC_FAILED:</div><div class="line"><a name="l00264"></a><span class="lineno"> 264</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CUBLAS_STATUS_ALLOC_FAILED"</span>;</div><div class="line"><a name="l00265"></a><span class="lineno"> 265</span>  <span class="keywordflow">case</span> CUBLAS_STATUS_INVALID_VALUE:</div><div class="line"><a name="l00266"></a><span class="lineno"> 266</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CUBLAS_STATUS_INVALID_VALUE"</span>;</div><div class="line"><a name="l00267"></a><span class="lineno"> 267</span>  <span class="keywordflow">case</span> CUBLAS_STATUS_ARCH_MISMATCH:</div><div class="line"><a name="l00268"></a><span class="lineno"> 268</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CUBLAS_STATUS_ARCH_MISMATCH"</span>;</div><div class="line"><a name="l00269"></a><span class="lineno"> 269</span>  <span class="keywordflow">case</span> CUBLAS_STATUS_MAPPING_ERROR:</div><div class="line"><a name="l00270"></a><span class="lineno"> 270</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CUBLAS_STATUS_MAPPING_ERROR"</span>;</div><div class="line"><a name="l00271"></a><span class="lineno"> 271</span>  <span class="keywordflow">case</span> CUBLAS_STATUS_EXECUTION_FAILED:</div><div class="line"><a name="l00272"></a><span class="lineno"> 272</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CUBLAS_STATUS_EXECUTION_FAILED"</span>;</div><div class="line"><a name="l00273"></a><span class="lineno"> 273</span>  <span class="keywordflow">case</span> CUBLAS_STATUS_INTERNAL_ERROR:</div><div class="line"><a name="l00274"></a><span class="lineno"> 274</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CUBLAS_STATUS_INTERNAL_ERROR"</span>;</div><div class="line"><a name="l00275"></a><span class="lineno"> 275</span>  <span class="keywordflow">case</span> CUBLAS_STATUS_NOT_SUPPORTED:</div><div class="line"><a name="l00276"></a><span class="lineno"> 276</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CUBLAS_STATUS_NOT_SUPPORTED"</span>;</div><div class="line"><a name="l00277"></a><span class="lineno"> 277</span>  <span class="keywordflow">default</span>:</div><div class="line"><a name="l00278"></a><span class="lineno"> 278</span>  <span class="keywordflow">break</span>;</div><div class="line"><a name="l00279"></a><span class="lineno"> 279</span>  }</div><div class="line"><a name="l00280"></a><span class="lineno"> 280</span>  <span class="keywordflow">return</span> <span class="stringliteral">"Unknown cuBLAS status"</span>;</div><div class="line"><a name="l00281"></a><span class="lineno"> 281</span> }</div><div class="line"><a name="l00282"></a><span class="lineno"> 282</span> </div><div class="line"><a name="l00283"></a><span class="lineno"> 283</span> <span class="preprocessor">#if CUDA_VERSION >= 8000</span></div><div class="line"><a name="l00284"></a><span class="lineno"> 284</span> </div><div class="line"><a name="l00289"></a><span class="lineno"> 289</span> <span class="keyword">inline</span> cublasOperation_t CublasTransposeOp(<span class="keywordtype">bool</span> <a class="code" href="namespacemshadow_1_1expr.html#afc62edfb800bb19e201b20b444831af3">transpose</a>) {</div><div class="line"><a name="l00290"></a><span class="lineno"> 290</span>  <span class="keywordflow">return</span> transpose ? CUBLAS_OP_T : CUBLAS_OP_N;</div><div class="line"><a name="l00291"></a><span class="lineno"> 291</span> }</div><div class="line"><a name="l00292"></a><span class="lineno"> 292</span> <span class="preprocessor">#endif</span></div><div class="line"><a name="l00293"></a><span class="lineno"> 293</span> </div><div class="line"><a name="l00299"></a><span class="lineno"><a class="line" href="namespacemxnet_1_1common_1_1cuda.html#abf9bcb4cb696e9ae61b818510dac39c8"> 299</a></span> <span class="keyword">inline</span> <span class="keyword">const</span> <span class="keywordtype">char</span>* <a class="code" href="namespacemxnet_1_1common_1_1cuda.html#abf9bcb4cb696e9ae61b818510dac39c8">CusolverGetErrorString</a>(cusolverStatus_t error) {</div><div class="line"><a name="l00300"></a><span class="lineno"> 300</span>  <span class="keywordflow">switch</span> (error) {</div><div class="line"><a name="l00301"></a><span class="lineno"> 301</span>  <span class="keywordflow">case</span> CUSOLVER_STATUS_SUCCESS:</div><div class="line"><a name="l00302"></a><span class="lineno"> 302</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CUSOLVER_STATUS_SUCCESS"</span>;</div><div class="line"><a name="l00303"></a><span class="lineno"> 303</span>  <span class="keywordflow">case</span> CUSOLVER_STATUS_NOT_INITIALIZED:</div><div class="line"><a name="l00304"></a><span class="lineno"> 304</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CUSOLVER_STATUS_NOT_INITIALIZED"</span>;</div><div class="line"><a name="l00305"></a><span class="lineno"> 305</span>  <span class="keywordflow">case</span> CUSOLVER_STATUS_ALLOC_FAILED:</div><div class="line"><a name="l00306"></a><span class="lineno"> 306</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CUSOLVER_STATUS_ALLOC_FAILED"</span>;</div><div class="line"><a name="l00307"></a><span class="lineno"> 307</span>  <span class="keywordflow">case</span> CUSOLVER_STATUS_INVALID_VALUE:</div><div class="line"><a name="l00308"></a><span class="lineno"> 308</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CUSOLVER_STATUS_INVALID_VALUE"</span>;</div><div class="line"><a name="l00309"></a><span class="lineno"> 309</span>  <span class="keywordflow">case</span> CUSOLVER_STATUS_ARCH_MISMATCH:</div><div class="line"><a name="l00310"></a><span class="lineno"> 310</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CUSOLVER_STATUS_ARCH_MISMATCH"</span>;</div><div class="line"><a name="l00311"></a><span class="lineno"> 311</span>  <span class="keywordflow">case</span> CUSOLVER_STATUS_EXECUTION_FAILED:</div><div class="line"><a name="l00312"></a><span class="lineno"> 312</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CUSOLVER_STATUS_EXECUTION_FAILED"</span>;</div><div class="line"><a name="l00313"></a><span class="lineno"> 313</span>  <span class="keywordflow">case</span> CUSOLVER_STATUS_INTERNAL_ERROR:</div><div class="line"><a name="l00314"></a><span class="lineno"> 314</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CUSOLVER_STATUS_INTERNAL_ERROR"</span>;</div><div class="line"><a name="l00315"></a><span class="lineno"> 315</span>  <span class="keywordflow">case</span> CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:</div><div class="line"><a name="l00316"></a><span class="lineno"> 316</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED"</span>;</div><div class="line"><a name="l00317"></a><span class="lineno"> 317</span>  <span class="keywordflow">default</span>:</div><div class="line"><a name="l00318"></a><span class="lineno"> 318</span>  <span class="keywordflow">break</span>;</div><div class="line"><a name="l00319"></a><span class="lineno"> 319</span>  }</div><div class="line"><a name="l00320"></a><span class="lineno"> 320</span>  <span class="keywordflow">return</span> <span class="stringliteral">"Unknown cuSOLVER status"</span>;</div><div class="line"><a name="l00321"></a><span class="lineno"> 321</span> }</div><div class="line"><a name="l00322"></a><span class="lineno"> 322</span> </div><div class="line"><a name="l00328"></a><span class="lineno"><a class="line" href="namespacemxnet_1_1common_1_1cuda.html#a97c06b2f4d26445a7386b0f54fae1feb"> 328</a></span> <span class="keyword">inline</span> <span class="keyword">const</span> <span class="keywordtype">char</span>* <a class="code" href="namespacemxnet_1_1common_1_1cuda.html#a97c06b2f4d26445a7386b0f54fae1feb">CurandGetErrorString</a>(curandStatus_t status) {</div><div class="line"><a name="l00329"></a><span class="lineno"> 329</span>  <span class="keywordflow">switch</span> (status) {</div><div class="line"><a name="l00330"></a><span class="lineno"> 330</span>  <span class="keywordflow">case</span> CURAND_STATUS_SUCCESS:</div><div class="line"><a name="l00331"></a><span class="lineno"> 331</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CURAND_STATUS_SUCCESS"</span>;</div><div class="line"><a name="l00332"></a><span class="lineno"> 332</span>  <span class="keywordflow">case</span> CURAND_STATUS_VERSION_MISMATCH:</div><div class="line"><a name="l00333"></a><span class="lineno"> 333</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CURAND_STATUS_VERSION_MISMATCH"</span>;</div><div class="line"><a name="l00334"></a><span class="lineno"> 334</span>  <span class="keywordflow">case</span> CURAND_STATUS_NOT_INITIALIZED:</div><div class="line"><a name="l00335"></a><span class="lineno"> 335</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CURAND_STATUS_NOT_INITIALIZED"</span>;</div><div class="line"><a name="l00336"></a><span class="lineno"> 336</span>  <span class="keywordflow">case</span> CURAND_STATUS_ALLOCATION_FAILED:</div><div class="line"><a name="l00337"></a><span class="lineno"> 337</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CURAND_STATUS_ALLOCATION_FAILED"</span>;</div><div class="line"><a name="l00338"></a><span class="lineno"> 338</span>  <span class="keywordflow">case</span> CURAND_STATUS_TYPE_ERROR:</div><div class="line"><a name="l00339"></a><span class="lineno"> 339</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CURAND_STATUS_TYPE_ERROR"</span>;</div><div class="line"><a name="l00340"></a><span class="lineno"> 340</span>  <span class="keywordflow">case</span> CURAND_STATUS_OUT_OF_RANGE:</div><div class="line"><a name="l00341"></a><span class="lineno"> 341</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CURAND_STATUS_OUT_OF_RANGE"</span>;</div><div class="line"><a name="l00342"></a><span class="lineno"> 342</span>  <span class="keywordflow">case</span> CURAND_STATUS_LENGTH_NOT_MULTIPLE:</div><div class="line"><a name="l00343"></a><span class="lineno"> 343</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CURAND_STATUS_LENGTH_NOT_MULTIPLE"</span>;</div><div class="line"><a name="l00344"></a><span class="lineno"> 344</span>  <span class="keywordflow">case</span> CURAND_STATUS_DOUBLE_PRECISION_REQUIRED:</div><div class="line"><a name="l00345"></a><span class="lineno"> 345</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CURAND_STATUS_DOUBLE_PRECISION_REQUIRED"</span>;</div><div class="line"><a name="l00346"></a><span class="lineno"> 346</span>  <span class="keywordflow">case</span> CURAND_STATUS_LAUNCH_FAILURE:</div><div class="line"><a name="l00347"></a><span class="lineno"> 347</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CURAND_STATUS_LAUNCH_FAILURE"</span>;</div><div class="line"><a name="l00348"></a><span class="lineno"> 348</span>  <span class="keywordflow">case</span> CURAND_STATUS_PREEXISTING_FAILURE:</div><div class="line"><a name="l00349"></a><span class="lineno"> 349</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CURAND_STATUS_PREEXISTING_FAILURE"</span>;</div><div class="line"><a name="l00350"></a><span class="lineno"> 350</span>  <span class="keywordflow">case</span> CURAND_STATUS_INITIALIZATION_FAILED:</div><div class="line"><a name="l00351"></a><span class="lineno"> 351</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CURAND_STATUS_INITIALIZATION_FAILED"</span>;</div><div class="line"><a name="l00352"></a><span class="lineno"> 352</span>  <span class="keywordflow">case</span> CURAND_STATUS_ARCH_MISMATCH:</div><div class="line"><a name="l00353"></a><span class="lineno"> 353</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CURAND_STATUS_ARCH_MISMATCH"</span>;</div><div class="line"><a name="l00354"></a><span class="lineno"> 354</span>  <span class="keywordflow">case</span> CURAND_STATUS_INTERNAL_ERROR:</div><div class="line"><a name="l00355"></a><span class="lineno"> 355</span>  <span class="keywordflow">return</span> <span class="stringliteral">"CURAND_STATUS_INTERNAL_ERROR"</span>;</div><div class="line"><a name="l00356"></a><span class="lineno"> 356</span>  }</div><div class="line"><a name="l00357"></a><span class="lineno"> 357</span>  <span class="keywordflow">return</span> <span class="stringliteral">"Unknown cuRAND status"</span>;</div><div class="line"><a name="l00358"></a><span class="lineno"> 358</span> }</div><div class="line"><a name="l00359"></a><span class="lineno"> 359</span> </div><div class="line"><a name="l00360"></a><span class="lineno"> 360</span> <span class="keyword">template</span> <<span class="keyword">typename</span> DType></div><div class="line"><a name="l00361"></a><span class="lineno"><a class="line" href="namespacemxnet_1_1common_1_1cuda.html#a6f3ee04eb382c57e10916108db3efd80"> 361</a></span> <span class="keyword">inline</span> DType __device__ <a class="code" href="namespacemxnet_1_1common_1_1cuda.html#a6f3ee04eb382c57e10916108db3efd80">CudaMax</a>(DType a, DType b) {</div><div class="line"><a name="l00362"></a><span class="lineno"> 362</span>  <span class="keywordflow">return</span> a > b ? a : b;</div><div class="line"><a name="l00363"></a><span class="lineno"> 363</span> }</div><div class="line"><a name="l00364"></a><span class="lineno"> 364</span> </div><div class="line"><a name="l00365"></a><span class="lineno"> 365</span> <span class="keyword">template</span> <<span class="keyword">typename</span> DType></div><div class="line"><a name="l00366"></a><span class="lineno"><a class="line" href="namespacemxnet_1_1common_1_1cuda.html#a03888f252f813f6d052ae84bf8801498"> 366</a></span> <span class="keyword">inline</span> DType __device__ <a class="code" href="namespacemxnet_1_1common_1_1cuda.html#a03888f252f813f6d052ae84bf8801498">CudaMin</a>(DType a, DType b) {</div><div class="line"><a name="l00367"></a><span class="lineno"> 367</span>  <span class="keywordflow">return</span> a < b ? a : b;</div><div class="line"><a name="l00368"></a><span class="lineno"> 368</span> }</div><div class="line"><a name="l00369"></a><span class="lineno"> 369</span> </div><div class="line"><a name="l00370"></a><span class="lineno"><a class="line" href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html"> 370</a></span> <span class="keyword">class </span><a class="code" href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html">DeviceStore</a> {</div><div class="line"><a name="l00371"></a><span class="lineno"> 371</span>  <span class="keyword">public</span>:</div><div class="line"><a name="l00373"></a><span class="lineno"><a class="line" href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html#ad9878a09a93d4fcaf9d0639b3613d9f7"> 373</a></span>  <span class="keyword">explicit</span> <a class="code" href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html#ad9878a09a93d4fcaf9d0639b3613d9f7">DeviceStore</a>(<span class="keywordtype">int</span> requested_device = -1, <span class="keywordtype">bool</span> restore = <span class="keyword">true</span>) :</div><div class="line"><a name="l00374"></a><span class="lineno"> 374</span>  restore_device_(-1),</div><div class="line"><a name="l00375"></a><span class="lineno"> 375</span>  current_device_(requested_device),</div><div class="line"><a name="l00376"></a><span class="lineno"> 376</span>  restore_(restore) {</div><div class="line"><a name="l00377"></a><span class="lineno"> 377</span>  <span class="keywordflow">if</span> (restore_)</div><div class="line"><a name="l00378"></a><span class="lineno"> 378</span>  <a class="code" href="cuda__utils_8h.html#a06cc7d24ca66505e69f5ad40009f5e8d">CUDA_CALL</a>(cudaGetDevice(&restore_device_));</div><div class="line"><a name="l00379"></a><span class="lineno"> 379</span>  <span class="keywordflow">if</span> (requested_device != restore_device_) {</div><div class="line"><a name="l00380"></a><span class="lineno"> 380</span>  <a class="code" href="namespacemshadow.html#abb4c36a0703ec671a5e74b0a8d37a47a">SetDevice</a>(requested_device);</div><div class="line"><a name="l00381"></a><span class="lineno"> 381</span>  }</div><div class="line"><a name="l00382"></a><span class="lineno"> 382</span>  }</div><div class="line"><a name="l00383"></a><span class="lineno"> 383</span> </div><div class="line"><a name="l00384"></a><span class="lineno"><a class="line" href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html#a701d38ae493688ee2136995fe8611aa0"> 384</a></span>  <a class="code" href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html#a701d38ae493688ee2136995fe8611aa0">~DeviceStore</a>() {</div><div class="line"><a name="l00385"></a><span class="lineno"> 385</span>  <span class="keywordflow">if</span> (restore_ &&</div><div class="line"><a name="l00386"></a><span class="lineno"> 386</span>  current_device_ != restore_device_ &&</div><div class="line"><a name="l00387"></a><span class="lineno"> 387</span>  current_device_ != -1 &&</div><div class="line"><a name="l00388"></a><span class="lineno"> 388</span>  restore_device_ != -1)</div><div class="line"><a name="l00389"></a><span class="lineno"> 389</span>  <a class="code" href="cuda__utils_8h.html#a06cc7d24ca66505e69f5ad40009f5e8d">CUDA_CALL</a>(cudaSetDevice(restore_device_));</div><div class="line"><a name="l00390"></a><span class="lineno"> 390</span>  }</div><div class="line"><a name="l00391"></a><span class="lineno"> 391</span> </div><div class="line"><a name="l00392"></a><span class="lineno"><a class="line" href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html#a01163fd4915e74bdd81dd7305917f0e4"> 392</a></span>  <span class="keywordtype">void</span> <a class="code" href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html#a01163fd4915e74bdd81dd7305917f0e4">SetDevice</a>(<span class="keywordtype">int</span> device) {</div><div class="line"><a name="l00393"></a><span class="lineno"> 393</span>  <span class="keywordflow">if</span> (device != -1) {</div><div class="line"><a name="l00394"></a><span class="lineno"> 394</span>  <a class="code" href="cuda__utils_8h.html#a06cc7d24ca66505e69f5ad40009f5e8d">CUDA_CALL</a>(cudaSetDevice(device));</div><div class="line"><a name="l00395"></a><span class="lineno"> 395</span>  current_device_ = device;</div><div class="line"><a name="l00396"></a><span class="lineno"> 396</span>  }</div><div class="line"><a name="l00397"></a><span class="lineno"> 397</span>  }</div><div class="line"><a name="l00398"></a><span class="lineno"> 398</span> </div><div class="line"><a name="l00399"></a><span class="lineno"> 399</span>  <span class="keyword">private</span>:</div><div class="line"><a name="l00400"></a><span class="lineno"> 400</span>  <span class="keywordtype">int</span> restore_device_;</div><div class="line"><a name="l00401"></a><span class="lineno"> 401</span>  <span class="keywordtype">int</span> current_device_;</div><div class="line"><a name="l00402"></a><span class="lineno"> 402</span>  <span class="keywordtype">bool</span> restore_;</div><div class="line"><a name="l00403"></a><span class="lineno"> 403</span> };</div><div class="line"><a name="l00404"></a><span class="lineno"> 404</span> </div><div class="line"><a name="l00413"></a><span class="lineno"> 413</span> <span class="keywordtype">int</span> <a class="code" href="namespacemxnet_1_1common_1_1cuda.html#aa7e0a8f7264c65d8000560d84d7fc54d">get_load_type</a>(<span class="keywordtype">size_t</span> N);</div><div class="line"><a name="l00414"></a><span class="lineno"> 414</span> </div><div class="line"><a name="l00425"></a><span class="lineno"> 425</span> <span class="keywordtype">int</span> <a class="code" href="namespacemxnet_1_1common_1_1cuda.html#a7608f1c1700694e453f37cfadfe9e30e">get_rows_per_block</a>(<span class="keywordtype">size_t</span> row_size, <span class="keywordtype">int</span> num_threads_per_block);</div><div class="line"><a name="l00426"></a><span class="lineno"> 426</span> </div><div class="line"><a name="l00427"></a><span class="lineno"> 427</span> } <span class="comment">// namespace cuda</span></div><div class="line"><a name="l00428"></a><span class="lineno"> 428</span> } <span class="comment">// namespace common</span></div><div class="line"><a name="l00429"></a><span class="lineno"> 429</span> } <span class="comment">// namespace mxnet</span></div><div class="line"><a name="l00430"></a><span class="lineno"> 430</span> </div><div class="line"><a name="l00432"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a7d0d1e932a096c498381cec82a650cfa"> 432</a></span> constexpr <span class="keywordtype">size_t</span> <a class="code" href="cuda__utils_8h.html#a7d0d1e932a096c498381cec82a650cfa">kMaxNumGpus</a> = 64;</div><div class="line"><a name="l00433"></a><span class="lineno"> 433</span> </div><div class="line"><a name="l00434"></a><span class="lineno"> 434</span> <span class="comment">// The implementations below assume that accesses of 32-bit ints are inherently atomic and</span></div><div class="line"><a name="l00435"></a><span class="lineno"> 435</span> <span class="comment">// can be read/written by multiple threads without locks. The values held should be < 2^31.</span></div><div class="line"><a name="l00436"></a><span class="lineno"> 436</span> </div><div class="line"><a name="l00445"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a31f4237a3ff5be2d420461a9baaffd1e"> 445</a></span> <span class="keyword">inline</span> <span class="keywordtype">int</span> <a class="code" href="cuda__utils_8h.html#a31f4237a3ff5be2d420461a9baaffd1e">cudaAttributeLookup</a>(<span class="keywordtype">int</span> device_id, std::vector<int32_t> *cached_values,</div><div class="line"><a name="l00446"></a><span class="lineno"> 446</span>  cudaDeviceAttr attr, <span class="keyword">const</span> <span class="keywordtype">char</span> *attr_name) {</div><div class="line"><a name="l00447"></a><span class="lineno"> 447</span>  <span class="keywordflow">if</span> (device_id < 0 || device_id >= static_cast<int>(cached_values->size())) {</div><div class="line"><a name="l00448"></a><span class="lineno"> 448</span>  LOG(FATAL) << attr_name << <span class="stringliteral">"(device_id) called with invalid id: "</span> << device_id;</div><div class="line"><a name="l00449"></a><span class="lineno"> 449</span>  } <span class="keywordflow">else</span> <span class="keywordflow">if</span> ((*cached_values)[device_id] < 0) {</div><div class="line"><a name="l00450"></a><span class="lineno"> 450</span>  <span class="keywordtype">int</span> temp = -1;</div><div class="line"><a name="l00451"></a><span class="lineno"> 451</span>  <a class="code" href="cuda__utils_8h.html#a06cc7d24ca66505e69f5ad40009f5e8d">CUDA_CALL</a>(cudaDeviceGetAttribute(&temp, attr, device_id));</div><div class="line"><a name="l00452"></a><span class="lineno"> 452</span>  (*cached_values)[device_id] = <span class="keyword">static_cast<</span>int32_t<span class="keyword">></span>(temp);</div><div class="line"><a name="l00453"></a><span class="lineno"> 453</span>  }</div><div class="line"><a name="l00454"></a><span class="lineno"> 454</span>  <span class="keywordflow">return</span> (*cached_values)[device_id];</div><div class="line"><a name="l00455"></a><span class="lineno"> 455</span> }</div><div class="line"><a name="l00456"></a><span class="lineno"> 456</span> </div><div class="line"><a name="l00462"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#aa79f548df23452162de37663f171e99d"> 462</a></span> <span class="keyword">inline</span> <span class="keywordtype">int</span> <a class="code" href="cuda__utils_8h.html#aa79f548df23452162de37663f171e99d">ComputeCapabilityMajor</a>(<span class="keywordtype">int</span> device_id) {</div><div class="line"><a name="l00463"></a><span class="lineno"> 463</span>  <span class="keyword">static</span> std::vector<int32_t> capability_major(<a class="code" href="cuda__utils_8h.html#a7d0d1e932a096c498381cec82a650cfa">kMaxNumGpus</a>, -1);</div><div class="line"><a name="l00464"></a><span class="lineno"> 464</span>  <span class="keywordflow">return</span> <a class="code" href="cuda__utils_8h.html#a31f4237a3ff5be2d420461a9baaffd1e">cudaAttributeLookup</a>(device_id, &capability_major,</div><div class="line"><a name="l00465"></a><span class="lineno"> 465</span>  cudaDevAttrComputeCapabilityMajor, <span class="stringliteral">"ComputeCapabilityMajor"</span>);</div><div class="line"><a name="l00466"></a><span class="lineno"> 466</span> }</div><div class="line"><a name="l00467"></a><span class="lineno"> 467</span> </div><div class="line"><a name="l00473"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a7c16e8770e4f399cabed1fc231ffd9b6"> 473</a></span> <span class="keyword">inline</span> <span class="keywordtype">int</span> <a class="code" href="cuda__utils_8h.html#a7c16e8770e4f399cabed1fc231ffd9b6">ComputeCapabilityMinor</a>(<span class="keywordtype">int</span> device_id) {</div><div class="line"><a name="l00474"></a><span class="lineno"> 474</span>  <span class="keyword">static</span> std::vector<int32_t> capability_minor(<a class="code" href="cuda__utils_8h.html#a7d0d1e932a096c498381cec82a650cfa">kMaxNumGpus</a>, -1);</div><div class="line"><a name="l00475"></a><span class="lineno"> 475</span>  <span class="keywordflow">return</span> <a class="code" href="cuda__utils_8h.html#a31f4237a3ff5be2d420461a9baaffd1e">cudaAttributeLookup</a>(device_id, &capability_minor,</div><div class="line"><a name="l00476"></a><span class="lineno"> 476</span>  cudaDevAttrComputeCapabilityMinor, <span class="stringliteral">"ComputeCapabilityMinor"</span>);</div><div class="line"><a name="l00477"></a><span class="lineno"> 477</span> }</div><div class="line"><a name="l00478"></a><span class="lineno"> 478</span> </div><div class="line"><a name="l00484"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a9779e3ad0efd0faec7fbe431c0db896d"> 484</a></span> <span class="keyword">inline</span> <span class="keywordtype">int</span> <a class="code" href="cuda__utils_8h.html#a9779e3ad0efd0faec7fbe431c0db896d">SMArch</a>(<span class="keywordtype">int</span> device_id) {</div><div class="line"><a name="l00485"></a><span class="lineno"> 485</span>  <span class="keyword">auto</span> major = <a class="code" href="cuda__utils_8h.html#aa79f548df23452162de37663f171e99d">ComputeCapabilityMajor</a>(device_id);</div><div class="line"><a name="l00486"></a><span class="lineno"> 486</span>  <span class="keyword">auto</span> minor = <a class="code" href="cuda__utils_8h.html#a7c16e8770e4f399cabed1fc231ffd9b6">ComputeCapabilityMinor</a>(device_id);</div><div class="line"><a name="l00487"></a><span class="lineno"> 487</span>  <span class="keywordflow">return</span> 10 * major + minor;</div><div class="line"><a name="l00488"></a><span class="lineno"> 488</span> }</div><div class="line"><a name="l00489"></a><span class="lineno"> 489</span> </div><div class="line"><a name="l00495"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#ac51c1cdc60e05dd857bfabca52355f2f"> 495</a></span> <span class="keyword">inline</span> <span class="keywordtype">int</span> <a class="code" href="cuda__utils_8h.html#ac51c1cdc60e05dd857bfabca52355f2f">MultiprocessorCount</a>(<span class="keywordtype">int</span> device_id) {</div><div class="line"><a name="l00496"></a><span class="lineno"> 496</span>  <span class="keyword">static</span> std::vector<int32_t> sm_counts(<a class="code" href="cuda__utils_8h.html#a7d0d1e932a096c498381cec82a650cfa">kMaxNumGpus</a>, -1);</div><div class="line"><a name="l00497"></a><span class="lineno"> 497</span>  <span class="keywordflow">return</span> <a class="code" href="cuda__utils_8h.html#a31f4237a3ff5be2d420461a9baaffd1e">cudaAttributeLookup</a>(device_id, &sm_counts,</div><div class="line"><a name="l00498"></a><span class="lineno"> 498</span>  cudaDevAttrMultiProcessorCount, <span class="stringliteral">"MultiprocessorCount"</span>);</div><div class="line"><a name="l00499"></a><span class="lineno"> 499</span> }</div><div class="line"><a name="l00500"></a><span class="lineno"> 500</span> </div><div class="line"><a name="l00506"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#af5b41c04e3d281500957c305532cd478"> 506</a></span> <span class="keyword">inline</span> <span class="keywordtype">int</span> <a class="code" href="cuda__utils_8h.html#af5b41c04e3d281500957c305532cd478">MaxSharedMemoryPerMultiprocessor</a>(<span class="keywordtype">int</span> device_id) {</div><div class="line"><a name="l00507"></a><span class="lineno"> 507</span>  <span class="keyword">static</span> std::vector<int32_t> max_smem_per_mutiprocessor(<a class="code" href="cuda__utils_8h.html#a7d0d1e932a096c498381cec82a650cfa">kMaxNumGpus</a>, -1);</div><div class="line"><a name="l00508"></a><span class="lineno"> 508</span>  <span class="keywordflow">return</span> <a class="code" href="cuda__utils_8h.html#a31f4237a3ff5be2d420461a9baaffd1e">cudaAttributeLookup</a>(device_id, &max_smem_per_mutiprocessor,</div><div class="line"><a name="l00509"></a><span class="lineno"> 509</span>  cudaDevAttrMaxSharedMemoryPerMultiprocessor,</div><div class="line"><a name="l00510"></a><span class="lineno"> 510</span>  <span class="stringliteral">"MaxSharedMemoryPerMultiprocessor"</span>);</div><div class="line"><a name="l00511"></a><span class="lineno"> 511</span> }</div><div class="line"><a name="l00512"></a><span class="lineno"> 512</span> </div><div class="line"><a name="l00518"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a82a24f3db4d0c91374cb3fe7d413f603"> 518</a></span> <span class="keyword">inline</span> <span class="keywordtype">bool</span> <a class="code" href="cuda__utils_8h.html#a82a24f3db4d0c91374cb3fe7d413f603">SupportsCooperativeLaunch</a>(<span class="keywordtype">int</span> device_id) {</div><div class="line"><a name="l00519"></a><span class="lineno"> 519</span>  <span class="keyword">static</span> std::vector<int32_t> coop_launch(<a class="code" href="cuda__utils_8h.html#a7d0d1e932a096c498381cec82a650cfa">kMaxNumGpus</a>, -1);</div><div class="line"><a name="l00520"></a><span class="lineno"> 520</span>  <span class="keywordflow">return</span> <a class="code" href="cuda__utils_8h.html#a31f4237a3ff5be2d420461a9baaffd1e">cudaAttributeLookup</a>(device_id, &coop_launch,</div><div class="line"><a name="l00521"></a><span class="lineno"> 521</span>  cudaDevAttrCooperativeLaunch, <span class="stringliteral">"SupportsCooperativeLaunch"</span>);</div><div class="line"><a name="l00522"></a><span class="lineno"> 522</span> }</div><div class="line"><a name="l00523"></a><span class="lineno"> 523</span> </div><div class="line"><a name="l00530"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#afb4268417c1d8886a39142c85c8f188f"> 530</a></span> <span class="keyword">inline</span> <span class="keywordtype">bool</span> <a class="code" href="cuda__utils_8h.html#afb4268417c1d8886a39142c85c8f188f">SupportsFloat16Compute</a>(<span class="keywordtype">int</span> device_id) {</div><div class="line"><a name="l00531"></a><span class="lineno"> 531</span>  <span class="keywordflow">if</span> (device_id < 0) {</div><div class="line"><a name="l00532"></a><span class="lineno"> 532</span>  <span class="keywordflow">return</span> <span class="keyword">false</span>;</div><div class="line"><a name="l00533"></a><span class="lineno"> 533</span>  } <span class="keywordflow">else</span> {</div><div class="line"><a name="l00534"></a><span class="lineno"> 534</span>  <span class="comment">// Kepler and most Maxwell GPUs do not support fp16 compute</span></div><div class="line"><a name="l00535"></a><span class="lineno"> 535</span>  <span class="keywordtype">int</span> computeCapabilityMajor = <a class="code" href="cuda__utils_8h.html#aa79f548df23452162de37663f171e99d">ComputeCapabilityMajor</a>(device_id);</div><div class="line"><a name="l00536"></a><span class="lineno"> 536</span>  <span class="keywordflow">return</span> (computeCapabilityMajor > 5) ||</div><div class="line"><a name="l00537"></a><span class="lineno"> 537</span>  (computeCapabilityMajor == 5 && <a class="code" href="cuda__utils_8h.html#a7c16e8770e4f399cabed1fc231ffd9b6">ComputeCapabilityMinor</a>(device_id) >= 3);</div><div class="line"><a name="l00538"></a><span class="lineno"> 538</span>  }</div><div class="line"><a name="l00539"></a><span class="lineno"> 539</span> }</div><div class="line"><a name="l00540"></a><span class="lineno"> 540</span> </div><div class="line"><a name="l00547"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#af7e22ce6d80d61e8ca37df23880ff1a9"> 547</a></span> <span class="keyword">inline</span> <span class="keywordtype">bool</span> <a class="code" href="cuda__utils_8h.html#af7e22ce6d80d61e8ca37df23880ff1a9">SupportsTensorCore</a>(<span class="keywordtype">int</span> device_id) {</div><div class="line"><a name="l00548"></a><span class="lineno"> 548</span>  <span class="comment">// Volta (sm_70) supports TensorCore algos</span></div><div class="line"><a name="l00549"></a><span class="lineno"> 549</span>  <span class="keywordflow">return</span> device_id >= 0 &&</div><div class="line"><a name="l00550"></a><span class="lineno"> 550</span>  <a class="code" href="cuda__utils_8h.html#aa79f548df23452162de37663f171e99d">ComputeCapabilityMajor</a>(device_id) >=7;</div><div class="line"><a name="l00551"></a><span class="lineno"> 551</span> }</div><div class="line"><a name="l00552"></a><span class="lineno"> 552</span> </div><div class="line"><a name="l00553"></a><span class="lineno"> 553</span> <span class="comment">// The policy if the user hasn't set the environment variable MXNET_CUDA_ALLOW_TENSOR_CORE</span></div><div class="line"><a name="l00554"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#aa7ba00b841d6b7ba443b0e58dac9ab88"> 554</a></span> <span class="preprocessor">#define MXNET_CUDA_ALLOW_TENSOR_CORE_DEFAULT true</span></div><div class="line"><a name="l00555"></a><span class="lineno"> 555</span> </div><div class="line"><a name="l00560"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#a464dee13053e3b0b1006c6307069196c"> 560</a></span> <span class="keyword">inline</span> <span class="keywordtype">bool</span> <a class="code" href="cuda__utils_8h.html#a464dee13053e3b0b1006c6307069196c">GetEnvAllowTensorCore</a>() {</div><div class="line"><a name="l00561"></a><span class="lineno"> 561</span>  <span class="comment">// Since these statics are in the '.h' file, they will exist and will be set</span></div><div class="line"><a name="l00562"></a><span class="lineno"> 562</span>  <span class="comment">// separately in each compilation unit. Not ideal, but cleaner than creating a</span></div><div class="line"><a name="l00563"></a><span class="lineno"> 563</span>  <span class="comment">// cuda_utils.cc solely to have a single instance and initialization.</span></div><div class="line"><a name="l00564"></a><span class="lineno"> 564</span>  <span class="keyword">static</span> <span class="keywordtype">bool</span> allow_tensor_core = <span class="keyword">false</span>;</div><div class="line"><a name="l00565"></a><span class="lineno"> 565</span>  <span class="keyword">static</span> <span class="keywordtype">bool</span> is_set = <span class="keyword">false</span>;</div><div class="line"><a name="l00566"></a><span class="lineno"> 566</span>  <span class="keywordflow">if</span> (!is_set) {</div><div class="line"><a name="l00567"></a><span class="lineno"> 567</span>  <span class="comment">// Use of optional<bool> here permits: "0", "1", "true" and "false" to all be legal.</span></div><div class="line"><a name="l00568"></a><span class="lineno"> 568</span>  <span class="keywordtype">bool</span> default_value = <a class="code" href="cuda__utils_8h.html#aa7ba00b841d6b7ba443b0e58dac9ab88">MXNET_CUDA_ALLOW_TENSOR_CORE_DEFAULT</a>;</div><div class="line"><a name="l00569"></a><span class="lineno"> 569</span>  allow_tensor_core = dmlc::GetEnv(<span class="stringliteral">"MXNET_CUDA_ALLOW_TENSOR_CORE"</span>,</div><div class="line"><a name="l00570"></a><span class="lineno"> 570</span>  <a class="code" href="classdmlc_1_1optional.html">dmlc::optional<bool></a>(default_value)).value();</div><div class="line"><a name="l00571"></a><span class="lineno"> 571</span>  is_set = <span class="keyword">true</span>;</div><div class="line"><a name="l00572"></a><span class="lineno"> 572</span>  }</div><div class="line"><a name="l00573"></a><span class="lineno"> 573</span>  <span class="keywordflow">return</span> allow_tensor_core;</div><div class="line"><a name="l00574"></a><span class="lineno"> 574</span> }</div><div class="line"><a name="l00575"></a><span class="lineno"> 575</span> </div><div class="line"><a name="l00576"></a><span class="lineno"> 576</span> <span class="comment">// The policy if the user hasn't set the environment variable</span></div><div class="line"><a name="l00577"></a><span class="lineno"> 577</span> <span class="comment">// CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION</span></div><div class="line"><a name="l00578"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#aa16d34c218441b0d4074baa8c66a5521"> 578</a></span> <span class="preprocessor">#define MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION_DEFAULT false</span></div><div class="line"><a name="l00579"></a><span class="lineno"> 579</span> </div><div class="line"><a name="l00583"></a><span class="lineno"><a class="line" href="cuda__utils_8h.html#ad77e70546b7f35ecba0098caa2d07523"> 583</a></span> <span class="keyword">inline</span> <span class="keywordtype">bool</span> <a class="code" href="cuda__utils_8h.html#ad77e70546b7f35ecba0098caa2d07523">GetEnvAllowTensorCoreConversion</a>() {</div><div class="line"><a name="l00584"></a><span class="lineno"> 584</span>  <span class="comment">// Use of optional<bool> here permits: "0", "1", "true" and "false" to all be</span></div><div class="line"><a name="l00585"></a><span class="lineno"> 585</span>  <span class="comment">// legal.</span></div><div class="line"><a name="l00586"></a><span class="lineno"> 586</span>  <span class="keywordtype">bool</span> default_value = <a class="code" href="cuda__utils_8h.html#aa16d34c218441b0d4074baa8c66a5521">MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION_DEFAULT</a>;</div><div class="line"><a name="l00587"></a><span class="lineno"> 587</span>  <span class="keywordflow">return</span> dmlc::GetEnv(<span class="stringliteral">"MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"</span>,</div><div class="line"><a name="l00588"></a><span class="lineno"> 588</span>  <a class="code" href="classdmlc_1_1optional.html">dmlc::optional<bool></a>(default_value))</div><div class="line"><a name="l00589"></a><span class="lineno"> 589</span>  .value();</div><div class="line"><a name="l00590"></a><span class="lineno"> 590</span> }</div><div class="line"><a name="l00591"></a><span class="lineno"> 591</span> </div><div class="line"><a name="l00592"></a><span class="lineno"> 592</span> <span class="preprocessor">#if CUDA_VERSION >= 9000</span></div><div class="line"><a name="l00593"></a><span class="lineno"> 593</span> <span class="comment">// Sets the cuBLAS math mode that determines the 'allow TensorCore' policy. Returns previous.</span></div><div class="line"><a name="l00594"></a><span class="lineno"> 594</span> <span class="keyword">inline</span> cublasMath_t SetCublasMathMode(cublasHandle_t blas_handle, cublasMath_t new_math_type) {</div><div class="line"><a name="l00595"></a><span class="lineno"> 595</span>  <span class="keyword">auto</span> handle_math_mode = CUBLAS_DEFAULT_MATH;</div><div class="line"><a name="l00596"></a><span class="lineno"> 596</span>  <a class="code" href="cuda__utils_8h.html#a685d7ca3c9370ff471665abcacdeb381">CUBLAS_CALL</a>(cublasGetMathMode(blas_handle, &handle_math_mode));</div><div class="line"><a name="l00597"></a><span class="lineno"> 597</span>  <a class="code" href="cuda__utils_8h.html#a685d7ca3c9370ff471665abcacdeb381">CUBLAS_CALL</a>(cublasSetMathMode(blas_handle, new_math_type));</div><div class="line"><a name="l00598"></a><span class="lineno"> 598</span>  <span class="keywordflow">return</span> handle_math_mode;</div><div class="line"><a name="l00599"></a><span class="lineno"> 599</span> }</div><div class="line"><a name="l00600"></a><span class="lineno"> 600</span> <span class="preprocessor">#endif</span></div><div class="line"><a name="l00601"></a><span class="lineno"> 601</span> </div><div class="line"><a name="l00602"></a><span class="lineno"> 602</span> <span class="preprocessor">#endif // MXNET_USE_CUDA</span></div><div class="line"><a name="l00603"></a><span class="lineno"> 603</span> </div><div class="line"><a name="l00604"></a><span class="lineno"> 604</span> <span class="preprocessor">#if MXNET_USE_CUDNN</span></div><div class="line"><a name="l00605"></a><span class="lineno"> 605</span> </div><div class="line"><a name="l00606"></a><span class="lineno"> 606</span> <span class="preprocessor">#include <cudnn.h></span></div><div class="line"><a name="l00607"></a><span class="lineno"> 607</span> </div><div class="line"><a name="l00608"></a><span class="lineno"> 608</span> <span class="comment">// Creating CUDNN_VERSION_AS_STRING as follows avoids a static_assert error message that shows</span></div><div class="line"><a name="l00609"></a><span class="lineno"> 609</span> <span class="comment">// the formula for CUDNN_VERSION, i.e. "1000 * 7 + 100 * 6 + 0" rather than number "7600".</span></div><div class="line"><a name="l00610"></a><span class="lineno"> 610</span> static_assert(CUDNN_PATCHLEVEL < 100 && CUDNN_MINOR < 10,</div><div class="line"><a name="l00611"></a><span class="lineno"> 611</span>  <span class="stringliteral">"CUDNN_VERSION_AS_STRING macro assumptions violated."</span>);</div><div class="line"><a name="l00612"></a><span class="lineno"> 612</span> <span class="preprocessor">#if CUDNN_PATCHLEVEL >= 10</span></div><div class="line"><a name="l00613"></a><span class="lineno"> 613</span> <span class="preprocessor">#define CUDNN_VERSION_AS_STRING QUOTEVALUE(CUDNN_MAJOR) \</span></div><div class="line"><a name="l00614"></a><span class="lineno"> 614</span> <span class="preprocessor"> QUOTEVALUE(CUDNN_MINOR) \</span></div><div class="line"><a name="l00615"></a><span class="lineno"> 615</span> <span class="preprocessor"> QUOTEVALUE(CUDNN_PATCHLEVEL)</span></div><div class="line"><a name="l00616"></a><span class="lineno"> 616</span> <span class="preprocessor">#else</span></div><div class="line"><a name="l00617"></a><span class="lineno"> 617</span> <span class="preprocessor">#define CUDNN_VERSION_AS_STRING QUOTEVALUE(CUDNN_MAJOR) \</span></div><div class="line"><a name="l00618"></a><span class="lineno"> 618</span> <span class="preprocessor"> QUOTEVALUE(CUDNN_MINOR) \</span></div><div class="line"><a name="l00619"></a><span class="lineno"> 619</span> <span class="preprocessor"> "0" QUOTEVALUE(CUDNN_PATCHLEVEL)</span></div><div class="line"><a name="l00620"></a><span class="lineno"> 620</span> <span class="preprocessor">#endif</span></div><div class="line"><a name="l00621"></a><span class="lineno"> 621</span> </div><div class="line"><a name="l00622"></a><span class="lineno"> 622</span> <span class="preprocessor">#define STATIC_ASSERT_CUDNN_VERSION_GE(min_version) \</span></div><div class="line"><a name="l00623"></a><span class="lineno"> 623</span> <span class="preprocessor"> static_assert(CUDNN_VERSION >= min_version, "Compiled-against cuDNN version " \</span></div><div class="line"><a name="l00624"></a><span class="lineno"> 624</span> <span class="preprocessor"> CUDNN_VERSION_AS_STRING " is too old, please upgrade system to version " \</span></div><div class="line"><a name="l00625"></a><span class="lineno"> 625</span> <span class="preprocessor"> QUOTEVALUE(min_version) " or later.")</span></div><div class="line"><a name="l00626"></a><span class="lineno"> 626</span> </div><div class="line"><a name="l00627"></a><span class="lineno"> 627</span> <span class="preprocessor">#define CUDNN_CALL(func) \</span></div><div class="line"><a name="l00628"></a><span class="lineno"> 628</span> <span class="preprocessor"> { \</span></div><div class="line"><a name="l00629"></a><span class="lineno"> 629</span> <span class="preprocessor"> cudnnStatus_t e = (func); \</span></div><div class="line"><a name="l00630"></a><span class="lineno"> 630</span> <span class="preprocessor"> CHECK_EQ(e, CUDNN_STATUS_SUCCESS) << "cuDNN: " << cudnnGetErrorString(e); \</span></div><div class="line"><a name="l00631"></a><span class="lineno"> 631</span> <span class="preprocessor"> }</span></div><div class="line"><a name="l00632"></a><span class="lineno"> 632</span> </div><div class="line"><a name="l00640"></a><span class="lineno"> 640</span> <span class="keyword">inline</span> <span class="keywordtype">int</span> MaxForwardAlgos(cudnnHandle_t cudnn_handle) {</div><div class="line"><a name="l00641"></a><span class="lineno"> 641</span>  STATIC_ASSERT_CUDNN_VERSION_GE(7000);</div><div class="line"><a name="l00642"></a><span class="lineno"> 642</span>  <span class="keywordtype">int</span> max_algos = 0;</div><div class="line"><a name="l00643"></a><span class="lineno"> 643</span>  CUDNN_CALL(cudnnGetConvolutionForwardAlgorithmMaxCount(cudnn_handle, &max_algos));</div><div class="line"><a name="l00644"></a><span class="lineno"> 644</span>  <span class="keywordflow">return</span> max_algos;</div><div class="line"><a name="l00645"></a><span class="lineno"> 645</span> }</div><div class="line"><a name="l00646"></a><span class="lineno"> 646</span> </div><div class="line"><a name="l00654"></a><span class="lineno"> 654</span> <span class="keyword">inline</span> <span class="keywordtype">int</span> MaxBackwardFilterAlgos(cudnnHandle_t cudnn_handle) {</div><div class="line"><a name="l00655"></a><span class="lineno"> 655</span>  STATIC_ASSERT_CUDNN_VERSION_GE(7000);</div><div class="line"><a name="l00656"></a><span class="lineno"> 656</span>  <span class="keywordtype">int</span> max_algos = 0;</div><div class="line"><a name="l00657"></a><span class="lineno"> 657</span>  CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithmMaxCount(cudnn_handle, &max_algos));</div><div class="line"><a name="l00658"></a><span class="lineno"> 658</span>  <span class="keywordflow">return</span> max_algos;</div><div class="line"><a name="l00659"></a><span class="lineno"> 659</span> }</div><div class="line"><a name="l00660"></a><span class="lineno"> 660</span> </div><div class="line"><a name="l00668"></a><span class="lineno"> 668</span> <span class="keyword">inline</span> <span class="keywordtype">int</span> MaxBackwardDataAlgos(cudnnHandle_t cudnn_handle) {</div><div class="line"><a name="l00669"></a><span class="lineno"> 669</span>  STATIC_ASSERT_CUDNN_VERSION_GE(7000);</div><div class="line"><a name="l00670"></a><span class="lineno"> 670</span>  <span class="keywordtype">int</span> max_algos = 0;</div><div class="line"><a name="l00671"></a><span class="lineno"> 671</span>  CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithmMaxCount(cudnn_handle, &max_algos));</div><div class="line"><a name="l00672"></a><span class="lineno"> 672</span>  <span class="keywordflow">return</span> max_algos;</div><div class="line"><a name="l00673"></a><span class="lineno"> 673</span> }</div><div class="line"><a name="l00674"></a><span class="lineno"> 674</span> </div><div class="line"><a name="l00675"></a><span class="lineno"> 675</span> <span class="preprocessor">#endif // MXNET_USE_CUDNN</span></div><div class="line"><a name="l00676"></a><span class="lineno"> 676</span> </div><div class="line"><a name="l00677"></a><span class="lineno"> 677</span> <span class="comment">// Overload atomicAdd to work for floats on all architectures</span></div><div class="line"><a name="l00678"></a><span class="lineno"> 678</span> <span class="preprocessor">#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600</span></div><div class="line"><a name="l00679"></a><span class="lineno"> 679</span> <span class="comment">// From CUDA Programming Guide</span></div><div class="line"><a name="l00680"></a><span class="lineno"> 680</span> <span class="keyword">static</span> <span class="keyword">inline</span> __device__ <span class="keywordtype">void</span> atomicAdd(<span class="keywordtype">double</span> *address, <span class="keywordtype">double</span> val) {</div><div class="line"><a name="l00681"></a><span class="lineno"> 681</span>  <span class="keywordtype">unsigned</span> <span class="keywordtype">long</span> <span class="keywordtype">long</span>* address_as_ull = <span class="comment">// NOLINT(*)</span></div><div class="line"><a name="l00682"></a><span class="lineno"> 682</span>  <span class="keyword">reinterpret_cast<</span><span class="keywordtype">unsigned</span> <span class="keywordtype">long</span> <span class="keywordtype">long</span>*<span class="keyword">></span>(address); <span class="comment">// NOLINT(*)</span></div><div class="line"><a name="l00683"></a><span class="lineno"> 683</span>  <span class="keywordtype">unsigned</span> <span class="keywordtype">long</span> <span class="keywordtype">long</span> old = *address_as_ull; <span class="comment">// NOLINT(*)</span></div><div class="line"><a name="l00684"></a><span class="lineno"> 684</span>  <span class="keywordtype">unsigned</span> <span class="keywordtype">long</span> <span class="keywordtype">long</span> assumed; <span class="comment">// NOLINT(*)</span></div><div class="line"><a name="l00685"></a><span class="lineno"> 685</span> </div><div class="line"><a name="l00686"></a><span class="lineno"> 686</span>  <span class="keywordflow">do</span> {</div><div class="line"><a name="l00687"></a><span class="lineno"> 687</span>  assumed = old;</div><div class="line"><a name="l00688"></a><span class="lineno"> 688</span>  old = atomicCAS(address_as_ull, assumed,</div><div class="line"><a name="l00689"></a><span class="lineno"> 689</span>  __double_as_longlong(val +</div><div class="line"><a name="l00690"></a><span class="lineno"> 690</span>  __longlong_as_double(assumed)));</div><div class="line"><a name="l00691"></a><span class="lineno"> 691</span> </div><div class="line"><a name="l00692"></a><span class="lineno"> 692</span>  <span class="comment">// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)</span></div><div class="line"><a name="l00693"></a><span class="lineno"> 693</span>  } <span class="keywordflow">while</span> (assumed != old);</div><div class="line"><a name="l00694"></a><span class="lineno"> 694</span> }</div><div class="line"><a name="l00695"></a><span class="lineno"> 695</span> <span class="preprocessor">#endif</span></div><div class="line"><a name="l00696"></a><span class="lineno"> 696</span> </div><div class="line"><a name="l00697"></a><span class="lineno"> 697</span> <span class="comment">// Overload atomicAdd for half precision</span></div><div class="line"><a name="l00698"></a><span class="lineno"> 698</span> <span class="comment">// Taken from:</span></div><div class="line"><a name="l00699"></a><span class="lineno"> 699</span> <span class="comment">// https://github.com/torch/cutorch/blob/master/lib/THC/THCAtomics.cuh</span></div><div class="line"><a name="l00700"></a><span class="lineno"> 700</span> <span class="preprocessor">#ifdef __CUDACC__</span></div><div class="line"><a name="l00701"></a><span class="lineno"> 701</span> <span class="keyword">static</span> <span class="keyword">inline</span> __device__ <span class="keywordtype">void</span> atomicAdd(mshadow::half::half_t *address,</div><div class="line"><a name="l00702"></a><span class="lineno"> 702</span>  mshadow::half::half_t val) {</div><div class="line"><a name="l00703"></a><span class="lineno"> 703</span>  <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> *address_as_ui =</div><div class="line"><a name="l00704"></a><span class="lineno"> 704</span>  <span class="keyword">reinterpret_cast<</span><span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> *<span class="keyword">></span>(<span class="keyword">reinterpret_cast<</span><span class="keywordtype">char</span> *<span class="keyword">></span>(address) -</div><div class="line"><a name="l00705"></a><span class="lineno"> 705</span>  (reinterpret_cast<size_t>(address) & 2));</div><div class="line"><a name="l00706"></a><span class="lineno"> 706</span>  <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> old = *address_as_ui;</div><div class="line"><a name="l00707"></a><span class="lineno"> 707</span>  <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> assumed;</div><div class="line"><a name="l00708"></a><span class="lineno"> 708</span> </div><div class="line"><a name="l00709"></a><span class="lineno"> 709</span>  <span class="keywordflow">do</span> {</div><div class="line"><a name="l00710"></a><span class="lineno"> 710</span>  assumed = old;</div><div class="line"><a name="l00711"></a><span class="lineno"> 711</span>  mshadow::half::half_t hsum;</div><div class="line"><a name="l00712"></a><span class="lineno"> 712</span>  hsum.half_ =</div><div class="line"><a name="l00713"></a><span class="lineno"> 713</span>  <span class="keyword">reinterpret_cast<</span><span class="keywordtype">size_t</span><span class="keyword">></span>(address) & 2 ? (old >> 16) : (old & 0xffff);</div><div class="line"><a name="l00714"></a><span class="lineno"> 714</span>  hsum += val;</div><div class="line"><a name="l00715"></a><span class="lineno"> 715</span>  old = <span class="keyword">reinterpret_cast<</span><span class="keywordtype">size_t</span><span class="keyword">></span>(address) & 2</div><div class="line"><a name="l00716"></a><span class="lineno"> 716</span>  ? (old & 0xffff) | (hsum.half_ << 16)</div><div class="line"><a name="l00717"></a><span class="lineno"> 717</span>  : (old & 0xffff0000) | hsum.half_;</div><div class="line"><a name="l00718"></a><span class="lineno"> 718</span>  old = atomicCAS(address_as_ui, assumed, old);</div><div class="line"><a name="l00719"></a><span class="lineno"> 719</span>  } <span class="keywordflow">while</span> (assumed != old);</div><div class="line"><a name="l00720"></a><span class="lineno"> 720</span> }</div><div class="line"><a name="l00721"></a><span class="lineno"> 721</span> </div><div class="line"><a name="l00722"></a><span class="lineno"> 722</span> <span class="keyword">static</span> <span class="keyword">inline</span> __device__ <span class="keywordtype">void</span> atomicAdd(uint8_t *address, uint8_t val) {</div><div class="line"><a name="l00723"></a><span class="lineno"> 723</span>  <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> * address_as_ui = (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> *) (address - ((<span class="keywordtype">size_t</span>)address & 0x3));</div><div class="line"><a name="l00724"></a><span class="lineno"> 724</span>  <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> old = *address_as_ui;</div><div class="line"><a name="l00725"></a><span class="lineno"> 725</span>  <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> shift = (((size_t)address & 0x3) << 3);</div><div class="line"><a name="l00726"></a><span class="lineno"> 726</span>  <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> sum;</div><div class="line"><a name="l00727"></a><span class="lineno"> 727</span>  <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> assumed;</div><div class="line"><a name="l00728"></a><span class="lineno"> 728</span> </div><div class="line"><a name="l00729"></a><span class="lineno"> 729</span>  <span class="keywordflow">do</span> {</div><div class="line"><a name="l00730"></a><span class="lineno"> 730</span>  assumed = old;</div><div class="line"><a name="l00731"></a><span class="lineno"> 731</span>  sum = val + <span class="keyword">static_cast<</span>uint8_t<span class="keyword">></span>((old >> shift) & 0xff);</div><div class="line"><a name="l00732"></a><span class="lineno"> 732</span>  old = (old & ~(0x000000ff << shift)) | (sum << shift);</div><div class="line"><a name="l00733"></a><span class="lineno"> 733</span>  old = atomicCAS(address_as_ui, assumed, old);</div><div class="line"><a name="l00734"></a><span class="lineno"> 734</span>  } <span class="keywordflow">while</span> (assumed != old);</div><div class="line"><a name="l00735"></a><span class="lineno"> 735</span> }</div><div class="line"><a name="l00736"></a><span class="lineno"> 736</span> </div><div class="line"><a name="l00737"></a><span class="lineno"> 737</span> <span class="keyword">static</span> <span class="keyword">inline</span> __device__ <span class="keywordtype">void</span> atomicAdd(int8_t *address, int8_t val) {</div><div class="line"><a name="l00738"></a><span class="lineno"> 738</span>  <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> * address_as_ui = (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> *) (address - ((<span class="keywordtype">size_t</span>)address & 0x3));</div><div class="line"><a name="l00739"></a><span class="lineno"> 739</span>  <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> old = *address_as_ui;</div><div class="line"><a name="l00740"></a><span class="lineno"> 740</span>  <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> shift = (((size_t)address & 0x3) << 3);</div><div class="line"><a name="l00741"></a><span class="lineno"> 741</span>  <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> sum;</div><div class="line"><a name="l00742"></a><span class="lineno"> 742</span>  <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> assumed;</div><div class="line"><a name="l00743"></a><span class="lineno"> 743</span> </div><div class="line"><a name="l00744"></a><span class="lineno"> 744</span>  <span class="keywordflow">do</span> {</div><div class="line"><a name="l00745"></a><span class="lineno"> 745</span>  assumed = old;</div><div class="line"><a name="l00746"></a><span class="lineno"> 746</span>  sum = val + <span class="keyword">static_cast<</span>int8_t<span class="keyword">></span>((old >> shift) & 0xff);</div><div class="line"><a name="l00747"></a><span class="lineno"> 747</span>  old = (old & ~(0x000000ff << shift)) | (sum << shift);</div><div class="line"><a name="l00748"></a><span class="lineno"> 748</span>  old = atomicCAS(address_as_ui, assumed, old);</div><div class="line"><a name="l00749"></a><span class="lineno"> 749</span>  } <span class="keywordflow">while</span> (assumed != old);</div><div class="line"><a name="l00750"></a><span class="lineno"> 750</span> }</div><div class="line"><a name="l00751"></a><span class="lineno"> 751</span> </div><div class="line"><a name="l00752"></a><span class="lineno"> 752</span> <span class="comment">// Overload atomicAdd to work for signed int64 on all architectures</span></div><div class="line"><a name="l00753"></a><span class="lineno"> 753</span> <span class="keyword">static</span> <span class="keyword">inline</span> __device__ <span class="keywordtype">void</span> atomicAdd(int64_t *address, int64_t val) {</div><div class="line"><a name="l00754"></a><span class="lineno"> 754</span>  atomicAdd(reinterpret_cast<unsigned long long*>(address), static_cast<unsigned long long>(val)); <span class="comment">// NOLINT</span></div><div class="line"><a name="l00755"></a><span class="lineno"> 755</span> }</div><div class="line"><a name="l00756"></a><span class="lineno"> 756</span> </div><div class="line"><a name="l00757"></a><span class="lineno"> 757</span> <span class="keyword">template</span> <<span class="keyword">typename</span> DType></div><div class="line"><a name="l00758"></a><span class="lineno"> 758</span> __device__ <span class="keyword">inline</span> DType ldg(<span class="keyword">const</span> DType* address) {</div><div class="line"><a name="l00759"></a><span class="lineno"> 759</span> <span class="preprocessor">#if __CUDA_ARCH__ >= 350</span></div><div class="line"><a name="l00760"></a><span class="lineno"> 760</span>  <span class="keywordflow">return</span> __ldg(address);</div><div class="line"><a name="l00761"></a><span class="lineno"> 761</span> <span class="preprocessor">#else</span></div><div class="line"><a name="l00762"></a><span class="lineno"> 762</span>  <span class="keywordflow">return</span> *address;</div><div class="line"><a name="l00763"></a><span class="lineno"> 763</span> <span class="preprocessor">#endif</span></div><div class="line"><a name="l00764"></a><span class="lineno"> 764</span> }</div><div class="line"><a name="l00765"></a><span class="lineno"> 765</span> </div><div class="line"><a name="l00766"></a><span class="lineno"> 766</span> <span class="keyword">template</span> <<span class="keyword">typename</span> OP, <span class="keyword">typename</span> T></div><div class="line"><a name="l00767"></a><span class="lineno"> 767</span> __device__ <span class="keyword">inline</span> T warp_reduce(T value, OP redfun) {</div><div class="line"><a name="l00768"></a><span class="lineno"> 768</span>  value = redfun(value, __shfl_down_sync(0xffffffff, value, 16));</div><div class="line"><a name="l00769"></a><span class="lineno"> 769</span>  value = redfun(value, __shfl_down_sync(0xffffffff, value, 8));</div><div class="line"><a name="l00770"></a><span class="lineno"> 770</span>  value = redfun(value, __shfl_down_sync(0xffffffff, value, 4));</div><div class="line"><a name="l00771"></a><span class="lineno"> 771</span>  value = redfun(value, __shfl_down_sync(0xffffffff, value, 2));</div><div class="line"><a name="l00772"></a><span class="lineno"> 772</span>  value = redfun(value, __shfl_down_sync(0xffffffff, value, 1));</div><div class="line"><a name="l00773"></a><span class="lineno"> 773</span>  <span class="keywordflow">return</span> value;</div><div class="line"><a name="l00774"></a><span class="lineno"> 774</span> }</div><div class="line"><a name="l00775"></a><span class="lineno"> 775</span> </div><div class="line"><a name="l00776"></a><span class="lineno"> 776</span> <span class="keyword">template</span> <<span class="keyword">typename</span> OP></div><div class="line"><a name="l00777"></a><span class="lineno"> 777</span> __device__ <span class="keyword">inline</span> mshadow::half::half_t warp_reduce(mshadow::half::half_t value, OP redfun) {</div><div class="line"><a name="l00778"></a><span class="lineno"> 778</span>  <span class="keywordtype">float</span> v = <span class="keyword">static_cast<</span><span class="keywordtype">float</span><span class="keyword">></span>(value);</div><div class="line"><a name="l00779"></a><span class="lineno"> 779</span>  v = redfun(v, __shfl_down_sync(0xffffffff, v, 16));</div><div class="line"><a name="l00780"></a><span class="lineno"> 780</span>  v = redfun(v, __shfl_down_sync(0xffffffff, v, 8));</div><div class="line"><a name="l00781"></a><span class="lineno"> 781</span>  v = redfun(v, __shfl_down_sync(0xffffffff, v, 4));</div><div class="line"><a name="l00782"></a><span class="lineno"> 782</span>  v = redfun(v, __shfl_down_sync(0xffffffff, v, 2));</div><div class="line"><a name="l00783"></a><span class="lineno"> 783</span>  v = redfun(v, __shfl_down_sync(0xffffffff, v, 1));</div><div class="line"><a name="l00784"></a><span class="lineno"> 784</span>  <span class="keywordflow">return</span> mshadow::half::half_t(v);</div><div class="line"><a name="l00785"></a><span class="lineno"> 785</span> }</div><div class="line"><a name="l00786"></a><span class="lineno"> 786</span> </div><div class="line"><a name="l00787"></a><span class="lineno"> 787</span> <span class="preprocessor">#endif // __CUDACC__</span></div><div class="line"><a name="l00788"></a><span class="lineno"> 788</span> </div><div class="line"><a name="l00789"></a><span class="lineno"> 789</span> <span class="preprocessor">#endif // MXNET_COMMON_CUDA_UTILS_H_</span></div><div class="ttc" id="namespacemshadow_html_a936bbfe6aeead8902973c098b87f18c1a404a5fd26328cf46170f6eb3424c9633"><div class="ttname"><a href="namespacemshadow.html#a936bbfe6aeead8902973c098b87f18c1a404a5fd26328cf46170f6eb3424c9633">mshadow::kFloat32</a></div><div class="ttdef"><b>Definition:</b> base.h:359</div></div> |
| <div class="ttc" id="cuda__utils_8h_html_a685d7ca3c9370ff471665abcacdeb381"><div class="ttname"><a href="cuda__utils_8h.html#a685d7ca3c9370ff471665abcacdeb381">CUBLAS_CALL</a></div><div class="ttdeci">#define CUBLAS_CALL(func)</div><div class="ttdoc">Protected cuBLAS call. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:110</div></div> |
| <div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4_html_acb859823c71c7c2aeeb55de510dcb1b4"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html#acb859823c71c7c2aeeb55de510dcb1b4">mxnet::common::cuda::CublasType< double >::zero</a></div><div class="ttdeci">static const double zero</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:219</div></div> |
| <div class="ttc" id="optional_8h_html"><div class="ttname"><a href="optional_8h.html">optional.h</a></div><div class="ttdoc">Container to hold optional data. </div></div> |
| <div class="ttc" id="cuda__utils_8h_html_aa79f548df23452162de37663f171e99d"><div class="ttname"><a href="cuda__utils_8h.html#aa79f548df23452162de37663f171e99d">ComputeCapabilityMajor</a></div><div class="ttdeci">int ComputeCapabilityMajor(int device_id)</div><div class="ttdoc">Determine major version number of the gpu&#39;s cuda compute architecture. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:462</div></div> |
| <div class="ttc" id="classmxnet_1_1common_1_1cuda_1_1DeviceStore_html"><div class="ttname"><a href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html">mxnet::common::cuda::DeviceStore</a></div><div class="ttdef"><b>Definition:</b> cuda_utils.h:370</div></div> |
| <div class="ttc" id="classdmlc_1_1optional_html"><div class="ttname"><a href="classdmlc_1_1optional.html">dmlc::optional</a></div><div class="ttdoc">c++17 compatible optional class. </div><div class="ttdef"><b>Definition:</b> optional.h:43</div></div> |
| <div class="ttc" id="cuda__utils_8h_html_ad77e70546b7f35ecba0098caa2d07523"><div class="ttname"><a href="cuda__utils_8h.html#ad77e70546b7f35ecba0098caa2d07523">GetEnvAllowTensorCoreConversion</a></div><div class="ttdeci">bool GetEnvAllowTensorCoreConversion()</div><div class="ttdoc">Returns global policy for TensorCore implicit type casting. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:583</div></div> |
| <div class="ttc" id="namespacemxnet_html"><div class="ttname"><a href="namespacemxnet.html">mxnet</a></div><div class="ttdoc">namespace of mxnet </div><div class="ttdef"><b>Definition:</b> api_registry.h:33</div></div> |
| <div class="ttc" id="namespacemxnet_1_1common_1_1cuda_html_aa7e0a8f7264c65d8000560d84d7fc54d"><div class="ttname"><a href="namespacemxnet_1_1common_1_1cuda.html#aa7e0a8f7264c65d8000560d84d7fc54d">mxnet::common::cuda::get_load_type</a></div><div class="ttdeci">int get_load_type(size_t N)</div><div class="ttdoc">Get the largest datatype suitable to read requested number of bytes. </div></div> |
| <div class="ttc" id="cuda__utils_8h_html_a464dee13053e3b0b1006c6307069196c"><div class="ttname"><a href="cuda__utils_8h.html#a464dee13053e3b0b1006c6307069196c">GetEnvAllowTensorCore</a></div><div class="ttdeci">bool GetEnvAllowTensorCore()</div><div class="ttdoc">Returns global policy for TensorCore algo use. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:560</div></div> |
| <div class="ttc" id="classmxnet_1_1common_1_1cuda_1_1DeviceStore_html_ad9878a09a93d4fcaf9d0639b3613d9f7"><div class="ttname"><a href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html#ad9878a09a93d4fcaf9d0639b3613d9f7">mxnet::common::cuda::DeviceStore::DeviceStore</a></div><div class="ttdeci">DeviceStore(int requested_device=-1, bool restore=true)</div><div class="ttdoc">default constructor- only optionally restores previous device </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:373</div></div> |
| <div class="ttc" id="cuda__utils_8h_html_a9779e3ad0efd0faec7fbe431c0db896d"><div class="ttname"><a href="cuda__utils_8h.html#a9779e3ad0efd0faec7fbe431c0db896d">SMArch</a></div><div class="ttdeci">int SMArch(int device_id)</div><div class="ttdoc">Return the integer SM architecture (e.g. Volta = 70). </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:484</div></div> |
| <div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01int32__t_01_4_html_a237f23f560dad8c0299c11a14f1dee23"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01int32__t_01_4.html#a237f23f560dad8c0299c11a14f1dee23">mxnet::common::cuda::CublasType< int32_t >::ScaleType</a></div><div class="ttdeci">int32_t ScaleType</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:247</div></div> |
| <div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4_html_a98a73866e9513d63627f935531456ca7"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html#a98a73866e9513d63627f935531456ca7">mxnet::common::cuda::CublasType< float >::zero</a></div><div class="ttdeci">static const float zero</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:209</div></div> |
| <div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4_html_a437fb574fefbe87d2add1289074b194a"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html#a437fb574fefbe87d2add1289074b194a">mxnet::common::cuda::CublasType< float >::one</a></div><div class="ttdeci">static const float one</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:208</div></div> |
| <div class="ttc" id="namespacemxnet_1_1common_1_1cuda_html_a03888f252f813f6d052ae84bf8801498"><div class="ttname"><a href="namespacemxnet_1_1common_1_1cuda.html#a03888f252f813f6d052ae84bf8801498">mxnet::common::cuda::CudaMin</a></div><div class="ttdeci">DType __device__ CudaMin(DType a, DType b)</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:366</div></div> |
| <div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4_html_a17c4026782d6a86b7d11aae44b684969"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html#a17c4026782d6a86b7d11aae44b684969">mxnet::common::cuda::CublasType< double >::one</a></div><div class="ttdeci">static const double one</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:218</div></div> |
| <div class="ttc" id="cuda__utils_8h_html_ac51c1cdc60e05dd857bfabca52355f2f"><div class="ttname"><a href="cuda__utils_8h.html#ac51c1cdc60e05dd857bfabca52355f2f">MultiprocessorCount</a></div><div class="ttdeci">int MultiprocessorCount(int device_id)</div><div class="ttdoc">Return the number of streaming multiprocessors of GPU device_id. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:495</div></div> |
| <div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4_html_ae8222ef1a6cba23c5f196393d74c45ce"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html#ae8222ef1a6cba23c5f196393d74c45ce">mxnet::common::cuda::CublasType< mshadow::half::half_t >::zero</a></div><div class="ttdeci">static const mshadow::half::half_t zero</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:229</div></div> |
| <div class="ttc" id="classmxnet_1_1common_1_1cuda_1_1DeviceStore_html_a01163fd4915e74bdd81dd7305917f0e4"><div class="ttname"><a href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html#a01163fd4915e74bdd81dd7305917f0e4">mxnet::common::cuda::DeviceStore::SetDevice</a></div><div class="ttdeci">void SetDevice(int device)</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:392</div></div> |
| <div class="ttc" id="cuda__utils_8h_html_afb4268417c1d8886a39142c85c8f188f"><div class="ttname"><a href="cuda__utils_8h.html#afb4268417c1d8886a39142c85c8f188f">SupportsFloat16Compute</a></div><div class="ttdeci">bool SupportsFloat16Compute(int device_id)</div><div class="ttdoc">Determine whether a cuda-capable gpu&#39;s architecture supports float16 math. Assume not if device_id is...</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:530</div></div> |
| <div class="ttc" id="cuda__utils_8h_html_a7d0d1e932a096c498381cec82a650cfa"><div class="ttname"><a href="cuda__utils_8h.html#a7d0d1e932a096c498381cec82a650cfa">kMaxNumGpus</a></div><div class="ttdeci">constexpr size_t kMaxNumGpus</div><div class="ttdoc">Maximum number of GPUs. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:432</div></div> |
| <div class="ttc" id="namespacemxnet_1_1common_1_1cuda_html_a6f3ee04eb382c57e10916108db3efd80"><div class="ttname"><a href="namespacemxnet_1_1common_1_1cuda.html#a6f3ee04eb382c57e10916108db3efd80">mxnet::common::cuda::CudaMax</a></div><div class="ttdeci">DType __device__ CudaMax(DType a, DType b)</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:361</div></div> |
| <div class="ttc" id="namespacemshadow_html_a936bbfe6aeead8902973c098b87f18c1a1f5a1c62216cbd2200443d501924cf28"><div class="ttname"><a href="namespacemshadow.html#a936bbfe6aeead8902973c098b87f18c1a1f5a1c62216cbd2200443d501924cf28">mshadow::kFloat64</a></div><div class="ttdef"><b>Definition:</b> base.h:360</div></div> |
| <div class="ttc" id="namespacemshadow_html_abb4c36a0703ec671a5e74b0a8d37a47a"><div class="ttname"><a href="namespacemshadow.html#abb4c36a0703ec671a5e74b0a8d37a47a">mshadow::SetDevice</a></div><div class="ttdeci">void SetDevice(int devid)</div><div class="ttdoc">set the device of current thread to work on </div></div> |
| <div class="ttc" id="cuda__utils_8h_html_aa16d34c218441b0d4074baa8c66a5521"><div class="ttname"><a href="cuda__utils_8h.html#aa16d34c218441b0d4074baa8c66a5521">MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION_DEFAULT</a></div><div class="ttdeci">#define MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION_DEFAULT</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:578</div></div> |
| <div class="ttc" id="cuda__utils_8h_html_a31f4237a3ff5be2d420461a9baaffd1e"><div class="ttname"><a href="cuda__utils_8h.html#a31f4237a3ff5be2d420461a9baaffd1e">cudaAttributeLookup</a></div><div class="ttdeci">int cudaAttributeLookup(int device_id, std::vector< int32_t > *cached_values, cudaDeviceAttr attr, const char *attr_name)</div><div class="ttdoc">Return an attribute GPU device_id. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:445</div></div> |
| <div class="ttc" id="namespacemshadow_html_a936bbfe6aeead8902973c098b87f18c1a4fbb02e389c3126918b505cd01188368"><div class="ttname"><a href="namespacemshadow.html#a936bbfe6aeead8902973c098b87f18c1a4fbb02e389c3126918b505cd01188368">mshadow::kInt32</a></div><div class="ttdef"><b>Definition:</b> base.h:363</div></div> |
| <div class="ttc" id="cuda__utils_8h_html_af7e22ce6d80d61e8ca37df23880ff1a9"><div class="ttname"><a href="cuda__utils_8h.html#af7e22ce6d80d61e8ca37df23880ff1a9">SupportsTensorCore</a></div><div class="ttdeci">bool SupportsTensorCore(int device_id)</div><div class="ttdoc">Determine whether a cuda-capable gpu&#39;s architecture supports Tensor Core math. Assume not if device_i...</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:547</div></div> |
| <div class="ttc" id="namespacemxnet_1_1common_1_1cuda_html_abf9bcb4cb696e9ae61b818510dac39c8"><div class="ttname"><a href="namespacemxnet_1_1common_1_1cuda.html#abf9bcb4cb696e9ae61b818510dac39c8">mxnet::common::cuda::CusolverGetErrorString</a></div><div class="ttdeci">const char * CusolverGetErrorString(cusolverStatus_t error)</div><div class="ttdoc">Get string representation of cuSOLVER errors. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:299</div></div> |
| <div class="ttc" id="cuda__utils_8h_html_aa7ba00b841d6b7ba443b0e58dac9ab88"><div class="ttname"><a href="cuda__utils_8h.html#aa7ba00b841d6b7ba443b0e58dac9ab88">MXNET_CUDA_ALLOW_TENSOR_CORE_DEFAULT</a></div><div class="ttdeci">#define MXNET_CUDA_ALLOW_TENSOR_CORE_DEFAULT</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:554</div></div> |
| <div class="ttc" id="namespacemxnet_1_1common_1_1cuda_html_a97c06b2f4d26445a7386b0f54fae1feb"><div class="ttname"><a href="namespacemxnet_1_1common_1_1cuda.html#a97c06b2f4d26445a7386b0f54fae1feb">mxnet::common::cuda::CurandGetErrorString</a></div><div class="ttdeci">const char * CurandGetErrorString(curandStatus_t status)</div><div class="ttdoc">Get string representation of cuRAND errors. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:328</div></div> |
| <div class="ttc" id="cuda__utils_8h_html_af5b41c04e3d281500957c305532cd478"><div class="ttname"><a href="cuda__utils_8h.html#af5b41c04e3d281500957c305532cd478">MaxSharedMemoryPerMultiprocessor</a></div><div class="ttdeci">int MaxSharedMemoryPerMultiprocessor(int device_id)</div><div class="ttdoc">Return the shared memory size in bytes of each of the GPU&#39;s streaming multiprocessors. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:506</div></div> |
| <div class="ttc" id="namespacemshadow_html_a936bbfe6aeead8902973c098b87f18c1a1a39d2f8230da3cb53528904c8a5fff0"><div class="ttname"><a href="namespacemshadow.html#a936bbfe6aeead8902973c098b87f18c1a1a39d2f8230da3cb53528904c8a5fff0">mshadow::kUint8</a></div><div class="ttdef"><b>Definition:</b> base.h:362</div></div> |
| <div class="ttc" id="classmxnet_1_1common_1_1cuda_1_1DeviceStore_html_a701d38ae493688ee2136995fe8611aa0"><div class="ttname"><a href="classmxnet_1_1common_1_1cuda_1_1DeviceStore.html#a701d38ae493688ee2136995fe8611aa0">mxnet::common::cuda::DeviceStore::~DeviceStore</a></div><div class="ttdeci">~DeviceStore()</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:384</div></div> |
| <div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4_html_a707d99741473be6edc5f4c345690e9ee"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html#a707d99741473be6edc5f4c345690e9ee">mxnet::common::cuda::CublasType< mshadow::half::half_t >::ScaleType</a></div><div class="ttdeci">float ScaleType</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:227</div></div> |
| <div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01uint8__t_01_4_html_a3af01d0a12763530b9568e836c4655e0"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01uint8__t_01_4.html#a3af01d0a12763530b9568e836c4655e0">mxnet::common::cuda::CublasType< uint8_t >::ScaleType</a></div><div class="ttdeci">uint8_t ScaleType</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:237</div></div> |
| <div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_html"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType.html">mxnet::common::cuda::CublasType</a></div><div class="ttdoc">Converts between C++ datatypes and enums/constants needed by cuBLAS. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:193</div></div> |
| <div class="ttc" id="cuda__utils_8h_html_a7c16e8770e4f399cabed1fc231ffd9b6"><div class="ttname"><a href="cuda__utils_8h.html#a7c16e8770e4f399cabed1fc231ffd9b6">ComputeCapabilityMinor</a></div><div class="ttdeci">int ComputeCapabilityMinor(int device_id)</div><div class="ttdoc">Determine minor version number of the gpu&#39;s cuda compute architecture. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:473</div></div> |
| <div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4_html_acf8d06465837aa6ee31e125e6eeda87c"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01mshadow_1_1half_1_1half__t_01_4.html#acf8d06465837aa6ee31e125e6eeda87c">mxnet::common::cuda::CublasType< mshadow::half::half_t >::one</a></div><div class="ttdeci">static const mshadow::half::half_t one</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:228</div></div> |
| <div class="ttc" id="namespacemshadow_html"><div class="ttname"><a href="namespacemshadow.html">mshadow</a></div><div class="ttdoc">overloaded + operator between half_t and bf16_t </div><div class="ttdef"><b>Definition:</b> base.h:334</div></div> |
| <div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4_html_a735caccec4d080a0fd7e1bf88a727955"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01float_01_4.html#a735caccec4d080a0fd7e1bf88a727955">mxnet::common::cuda::CublasType< float >::ScaleType</a></div><div class="ttdeci">float ScaleType</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:207</div></div> |
| <div class="ttc" id="namespacemshadow_1_1expr_html_afc62edfb800bb19e201b20b444831af3"><div class="ttname"><a href="namespacemshadow_1_1expr.html#afc62edfb800bb19e201b20b444831af3">mshadow::expr::transpose</a></div><div class="ttdeci">TransposeExExp< SrcExp, DType, ExpInfo< SrcExp >::kDim > transpose(const Exp< SrcExp, DType, etype > &src, Shape< ExpInfo< SrcExp >::kDim > axes)</div><div class="ttdoc">a expression that reshapes a tensor to another shape </div><div class="ttdef"><b>Definition:</b> transpose.h:76</div></div> |
| <div class="ttc" id="namespacemshadow_html_a936bbfe6aeead8902973c098b87f18c1a37ab9e42757689b17620f5728296d5d4"><div class="ttname"><a href="namespacemshadow.html#a936bbfe6aeead8902973c098b87f18c1a37ab9e42757689b17620f5728296d5d4">mshadow::kFloat16</a></div><div class="ttdef"><b>Definition:</b> base.h:361</div></div> |
| <div class="ttc" id="cuda__utils_8h_html_a06cc7d24ca66505e69f5ad40009f5e8d"><div class="ttname"><a href="cuda__utils_8h.html#a06cc7d24ca66505e69f5ad40009f5e8d">CUDA_CALL</a></div><div class="ttdeci">#define CUDA_CALL(func)</div><div class="ttdoc">Protected CUDA call. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:97</div></div> |
| <div class="ttc" id="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4_html_a46da9bddaa921bd38ec1c90a975972fe"><div class="ttname"><a href="structmxnet_1_1common_1_1cuda_1_1CublasType_3_01double_01_4.html#a46da9bddaa921bd38ec1c90a975972fe">mxnet::common::cuda::CublasType< double >::ScaleType</a></div><div class="ttdeci">double ScaleType</div><div class="ttdef"><b>Definition:</b> cuda_utils.h:217</div></div> |
| <div class="ttc" id="3rdparty_2mshadow_2mshadow_2base_8h_html"><div class="ttname"><a href="3rdparty_2mshadow_2mshadow_2base_8h.html">base.h</a></div></div> |
| <div class="ttc" id="namespacemxnet_1_1common_1_1cuda_html_a9feee613a4f16a954dd68e55345a72ac"><div class="ttname"><a href="namespacemxnet_1_1common_1_1cuda.html#a9feee613a4f16a954dd68e55345a72ac">mxnet::common::cuda::CublasGetErrorString</a></div><div class="ttdeci">const char * CublasGetErrorString(cublasStatus_t error)</div><div class="ttdoc">Get string representation of cuBLAS errors. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:257</div></div> |
| <div class="ttc" id="namespacemxnet_1_1common_1_1cuda_html_a7608f1c1700694e453f37cfadfe9e30e"><div class="ttname"><a href="namespacemxnet_1_1common_1_1cuda.html#a7608f1c1700694e453f37cfadfe9e30e">mxnet::common::cuda::get_rows_per_block</a></div><div class="ttdeci">int get_rows_per_block(size_t row_size, int num_threads_per_block)</div><div class="ttdoc">Determine how many rows in a 2D matrix should a block of threads handle based on the row size and the...</div></div> |
| <div class="ttc" id="parameter_8h_html"><div class="ttname"><a href="parameter_8h.html">parameter.h</a></div><div class="ttdoc">Provide lightweight util to do parameter setup and checking. </div></div> |
| <div class="ttc" id="cuda__utils_8h_html_a82a24f3db4d0c91374cb3fe7d413f603"><div class="ttname"><a href="cuda__utils_8h.html#a82a24f3db4d0c91374cb3fe7d413f603">SupportsCooperativeLaunch</a></div><div class="ttdeci">bool SupportsCooperativeLaunch(int device_id)</div><div class="ttdoc">Return whether the GPU device_id supports cooperative-group kernel launching. </div><div class="ttdef"><b>Definition:</b> cuda_utils.h:518</div></div> |
| <div class="ttc" id="libinfo_8h_html"><div class="ttname"><a href="libinfo_8h.html">libinfo.h</a></div><div class="ttdoc">get features of the MXNet library at runtime </div></div> |
| </div><!-- fragment --></div><!-- contents --> |
| <!-- start footer part --> |
| <hr class="footer"/><address class="footer"><small> |
| Generated on Wed Jun 8 2022 23:30:29 for mxnet by  <a href="http://www.doxygen.org/index.html"> |
| <img class="footer" src="doxygen.png" alt="doxygen"/> |
| </a> 1.8.13 |
| </small></address> |
| </body> |
| </html> |