25 #include <onnxruntime_cxx_api.h> 39 if (!domain.
empty()) {
40 prop_name << domain <<
'.';
44 if (!graph_name.
empty()) {
45 prop_name << graph_name <<
'.';
50 return prop_name.
str();
58 for (
auto i = shape.
begin(); i != shape.
end() - 1; ++i) {
59 stream << *i <<
" x ";
61 stream << shape.
back();
68 if (property_id == PropertyId::create<OnnxProperty>()) {
69 return std::make_shared<OnnxSourceTask>(
m_models);
79 auto allocator = Ort::AllocatorWithDefaultOptions();
82 const auto& models = onnx_config.
getModels();
84 for (
auto model_path : models) {
89 model_info.
m_session = Euclid::make_unique<Ort::Session>(
ORT_ENV, model_path.c_str(), Ort::SessionOptions{
nullptr});
91 if (model_info.
m_session->GetInputCount() != 1) {
94 if (model_info.
m_session->GetOutputCount() != 1) {
95 throw Elements::Exception() <<
"Only ONNX models with a single output tensor are supported";
105 auto input_type = model_info.
m_session->GetInputTypeInfo(0);
106 auto output_type = model_info.
m_session->GetOutputTypeInfo(0);
108 model_info.
m_input_shape = input_type.GetTensorTypeAndShapeInfo().GetShape();
109 model_info.
m_input_type = input_type.GetTensorTypeAndShapeInfo().GetElementType();
110 model_info.
m_output_shape = output_type.GetTensorTypeAndShapeInfo().GetShape();
111 model_info.
m_output_type = output_type.GetTensorTypeAndShapeInfo().GetElementType();
113 if (model_info.
m_input_type != ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT) {
133 return prop.getData<T>(key);
139 for (
const auto& model :
m_models) {
140 switch (model.m_output_type) {
141 case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT:
142 registerColumnConverter<float>(registry, model);
144 case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32:
145 registerColumnConverter<int32_t>(registry, model);
void info(const std::string &logMessage)
void registerConfiguration()