onnxruntime 예제코드에는 struct를 생성하여 사용하는 방법이 나와있다.
코드 중 주요 부분만 떼서 보면 아래와 같다.
struct onnx_struct {
onnx_struct() {
auto memory_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
input_tensor_ = Ort::Value::CreateTensor<float>(memory_info, input_image_.data(), input_image_.size(), input_shape_.data(), input_shape_.size());
output_tensor_ = Ort::Value::CreateTensor<float>(memory_info, results_.data(), results_.size(), output_shape_.data(), output_shape_.size());
}
std::array<float, 1000> Run() {
const char* input_names[] = { "input" };
const char* output_names[] = { "output" };
session_.Run(Ort::RunOptions{ nullptr }, input_names, &input_tensor_, 1, output_names, &output_tensor_, 1);
// result_ = std::distance(results_.begin(), std::max_element(results_.begin(), results_.end()));
return results_;
}
static constexpr const int width_ = 128;
static constexpr const int height_ = 128;
std::array<float, width_ * height_> input_image_{};
std::array<float, 1000> results_{};
// int64_t result_{ 0 };
private:
Ort::Env env;
Ort::Session session_{ env, L"./data/test_onnx.onnx", Ort::SessionOptions{ nullptr } };
Ort::Value input_tensor_{ nullptr };
std::array<int64_t, 4> input_shape_{ 1, 1, width_, height_ };
Ort::Value output_tensor_{ nullptr };
std::array<int64_t, 2> output_shape_{ 1, 1000 };
};
입력 사이즈와 출력 크기가 다 하드코딩 되어있을 뿐 아니라 모델 로드 부분도 구조체 생성단계에서 수행하니
활용 측면에서는 영 꽝이다.
그래서 동적으로 사용 가능하도록 클래스 수정 중..
필요한 기능에 따라 지속적으로 수정 예정. ( 혹 비효율 적이거나 다른 방법이 있다면 알려주세요)
class onnx_module
{
public:
onnx_module(std::string sModelPath, int nInputC, int nInputWidth, int nInputHeight, int nOutputDims);
onnx_module(std::string sModelPath, int nInputC, int nInputWidth, int nInputHeight, int nOutputC, int nOutputWidth, int nOutputHeight);
void Run(std::vector<float>& vResults);
std::vector<float> results_;
std::vector<float> input_image_;
private:
Ort::Env env;
Ort::Session* session_;
Ort::Value input_tensor_{ nullptr };
std::vector<int64_t> input_shape_;
Ort::Value output_tensor_{ nullptr };
std::vector<int64_t> output_shape_;
};
onnx_module::onnx_module(std::string sModelPath, int nInputC, int nInputWidth, int nInputHeight, int nOutputDims)
{
std::string sPath = sModelPath;
wchar_t* wPath = new wchar_t[sPath.length() + 1];
std::copy(sPath.begin(), sPath.end(), wPath);
wPath[sPath.length()] = 0;
session_ = new Ort::Session(env, wPath, Ort::SessionOptions{ nullptr });
delete[] wPath;
const int batch_ = 1;
const int channel_ = nInputC;
const int width_ = nInputWidth;
const int height_ = nInputHeight;
input_image_.assign(width_*height_*channel_, 0.0);
results_.assign(nOutputDims, 0.0);
input_shape_.clear();
input_shape_.push_back(batch_);
input_shape_.push_back(channel_);
input_shape_.push_back(width_);
input_shape_.push_back(height_);
output_shape_.clear();
output_shape_.push_back(batch_);
output_shape_.push_back(nOutputDims);
auto memory_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
input_tensor_ = Ort::Value::CreateTensor<float>(memory_info, input_image_.data(), input_image_.size(), input_shape_.data(), input_shape_.size());
output_tensor_ = Ort::Value::CreateTensor<float>(memory_info, results_.data(), results_.size(), output_shape_.data(), output_shape_.size());
}
onnx_module::onnx_module(std::string sModelPath, int nInputC, int nInputWidth, int nInputHeight, int nOutputC, int nOutputWidth, int nOutputHeight)
{
std::string sPath = sModelPath;
wchar_t* wPath = new wchar_t[sPath.length() + 1];
std::copy(sPath.begin(), sPath.end(), wPath);
wPath[sPath.length()] = 0;
session_ = new Ort::Session(env, wPath, Ort::SessionOptions{ nullptr });
delete[] wPath;
const int batch_ = 1;
const int channel_in = nInputC;
const int width_in = nInputWidth;
const int height_in = nInputHeight;
const int channel_out = nOutputC;
const int width_out = nOutputWidth;
const int height_out = nOutputHeight;
input_image_.assign(width_in * height_in * channel_in, 0.0);
results_.assign(nOutputWidth * nOutputHeight * nOutputC, 0.0);
input_shape_.clear();
input_shape_.push_back(batch_);
input_shape_.push_back(channel_in);
input_shape_.push_back(width_in);
input_shape_.push_back(height_in);
output_shape_.clear();
output_shape_.push_back(batch_);
output_shape_.push_back(channel_out);
output_shape_.push_back(width_out);
output_shape_.push_back(height_out);
auto memory_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
input_tensor_ = Ort::Value::CreateTensor<float>(memory_info, input_image_.data(), input_image_.size(), input_shape_.data(), input_shape_.size());
output_tensor_ = Ort::Value::CreateTensor<float>(memory_info, results_.data(), results_.size(), output_shape_.data(), output_shape_.size());
}
void onnx_module::Run(std::vector<float>& vResults)
{
const char* input_names[] = { "input" };
const char* output_names[] = { "output" };
(*session_).Run(Ort::RunOptions{ nullptr }, input_names, &input_tensor_, 1, output_names, &output_tensor_, 1);
vResults.assign(results_.begin(), results_.end());
}
아직 수정해야 할 부분이 많다... 어렵다.. ㅋ
'전공관련 > Deep Learning' 카테고리의 다른 글
[ONNX] Onnx convert 모델을 검증하자 (2) | 2020.04.22 |
---|---|
[Onnx] Onnxruntime - GPU를 사용하자 (8) | 2020.03.09 |
[Onnx] visual studio에서 onnxruntime을 설치 해 보자 (0) | 2020.02.26 |
[Onnx] pytorch model을 onnx로 변환하여 사용하자 (1) | 2020.02.26 |
[Pytorch] Custom Dataloader를 사용하자 (0) | 2019.12.23 |