blob: c6d5156ffbc4b1f0b749dad9dbf74e010be0de2f [file] [log] [blame]
/*!
* Copyright (c) 2016 by Contributors
* \file caffe_blob.cc
* \brief Implementations of SetDataGradToBlob given various device/dimension
* \author Haoran Wang
*/
#include "caffe_blob.h"
namespace mxnet {
namespace op {
namespace caffe {
template<>
void SetDataGradToBlob<mshadow::cpu, float>(caffeMemoryTypes memType,
std::vector<::caffe::Blob<float>*>::iterator blob,
std::vector<TBlob>::const_iterator itr) {
float *data_ptr = reinterpret_cast<float*>((*itr).dptr_);
if (memType == Data)
(*blob)->set_cpu_data(data_ptr);
else
MXCAFFEBLOB(*blob, float)->set_cpu_diff(data_ptr);
}
template<>
void SetDataGradToBlob<mshadow::cpu, double>(caffeMemoryTypes memType,
std::vector<::caffe::Blob<double>*>::iterator blob,
std::vector<TBlob>::const_iterator itr) {
double *data_ptr = reinterpret_cast<double*>((*itr).dptr_);
if (memType == Data)
(*blob)->set_cpu_data(data_ptr);
else
MXCAFFEBLOB(*blob, double)->set_cpu_diff(data_ptr);
}
template<>
void SetDataGradToBlob<mshadow::gpu, float>(caffeMemoryTypes memType,
std::vector<::caffe::Blob<float>*>::iterator blob,
std::vector<TBlob>::const_iterator itr) {
float *data_ptr = reinterpret_cast<float*>((*itr).dptr_);
if (memType == Data)
(*blob)->set_gpu_data(data_ptr);
else
MXCAFFEBLOB(*blob, float)->set_gpu_diff(data_ptr);
}
template<>
void SetDataGradToBlob<mshadow::gpu, double>(caffeMemoryTypes memType,
std::vector<::caffe::Blob<double>*>::iterator blob,
std::vector<TBlob>::const_iterator itr) {
double *data_ptr = reinterpret_cast<double*>((*itr).dptr_);
if (memType == Data)
(*blob)->set_gpu_data(data_ptr);
else
MXCAFFEBLOB(*blob, double)->set_gpu_diff(data_ptr);
}
TShape Vector2TShape(const std::vector<int> &vec_int) {
std::vector<mshadow::index_t> vec;
for (uint32_t i = 0; i < vec_int.size(); ++i)
vec.push_back(vec_int[i]);
// 0-dim represents scalar in caffe
if (vec_int.size() == 0)
vec.push_back(1);
return {vec.begin(), vec.end()};
}
std::vector<int> TShape2Vector(const TShape &tshape) {
std::vector<int> s;
for (uint32_t i =0 ; i < tshape.ndim(); ++i)
s.push_back(tshape[i]);
return s;
}
} // namespace caffe
} // namespace op
} // namespace mxnet