I am attempting to perform inference using tvm in c++.
The last line in the main function results in a segmentation fault. Before this, everything else seems to be working fine.
Any help would be appreciated.
Thanks in advance.
#include <dlpack/dlpack.h>
#include <tvm/runtime/module.h>
#include <tvm/runtime/registry.h>
#include <tvm/runtime/packed_func.h>
int main()
{
// tvm module for compiled functions
tvm::runtime::Module mod_syslib = tvm::runtime::Module::LoadFromFile("./deploy_lib.so");
// json graph
//std::ifstream json_in("model/model_graph.json", std::ios::in);
std::ifstream json_in("deploy_graph.json", std::ios::in);
std::string json_data((std::istreambuf_iterator<char>(json_in)), std::istreambuf_iterator<char>());
json_in.close();
// parameters in binary
//std::ifstream params_in("model/model_graph.params", std::ios::binary);
std::ifstream params_in("deploy_params.params", std::ios::binary);
std::string params_data((std::istreambuf_iterator<char>(params_in)), std::istreambuf_iterator<char>());
params_in.close();
// parameters need to be TVMByteArray type to indicate the binary data
TVMByteArray params_arr;
params_arr.data = params_data.c_str();
params_arr.size = params_data.length();
int dtype_code = kDLFloat;
int dtype_bits = 32;
int dtype_lanes = 1;
int device_cpu = kDLCPU;
//int device_gpu = kDLGPU;
int device_cpu_id = 0;
//int device_gpu_id = 0;
// get global function module for graph runtime
tvm::runtime::Module mod = (*tvm::runtime::Registry::Get("tvm.graph_runtime.create"))(json_data, mod_syslib, device_cpu, device_cpu_id);
return 0;
}