The model inference results in C++ are all NAN. when I load the same model in python, I get the right output. Here is my C++ code.
arma::vec Complete_NN_potential::Sym_potentials(vec &input)
{
LOG(INFO) << "Running graph executor...";
DLDevice dev{kDLCPU, 1};
tvm::runtime::Module mod_factory = tvm::runtime::Module::LoadFromFile("parameters/CH4Cl.so");
// create the graph executor module
tvm::runtime::Module gmod = mod_factory.GetFunction("default")(dev);
tvm::runtime::PackedFunc set_input = gmod.GetFunction("set_input");
tvm::runtime::PackedFunc get_output = gmod.GetFunction("get_output");
tvm::runtime::PackedFunc run = gmod.GetFunction("run");
// Use the C++ API
tvm::runtime::NDArray x = tvm::runtime::NDArray::Empty({1, 12}, DLDataType{kDLFloat, 64, 1}, dev);
tvm::runtime::NDArray y = tvm::runtime::NDArray::Empty({1, 8}, DLDataType{kDLFloat, 64, 1}, dev);
for (int i = 0; i < 12; ++i)
{
static_cast<double *>(x->data)[i] = input(i);
}
std::cout << "x:" << std::endl;
// ICHECK_EQ(static_cast<double*>(y->data)[0],8.0642538928751464e-02);
for (int i = 0; i < 8; ++i)
{
std::cout << static_cast<double *>(x->data)[i] << std::endl;
}
// set the right input
set_input("x", x);
// run the code
run();
// get the output
get_output(0, y);
std::cout << "y:" << std::endl;
// ICHECK_EQ(static_cast<double*>(y->data)[0],8.0642538928751464e-02);
for (int i = 0; i < 8; ++i)
{
std::cout << static_cast<double *>(y->data)[i] << std::endl;
}
// const double tmp[12] = {
// input(0), input(1), input(2),
// input(3), input(4), input(5),
// input(6), input(7), input(8),
// input(9), input(10), input(11),
// };
vec pot = {
static_cast<double *>(y->data)[0], static_cast<double *>(y->data)[1], static_cast<double *>(y->data)[2],
static_cast<double *>(y->data)[3], static_cast<double *>(y->data)[4], static_cast<double *>(y->data)[5],
static_cast<double *>(y->data)[6], static_cast<double *>(y->data)[7]};
return pot;
}
I would be grateful if someone could help me.