Hi, I tried to do the following to import a simple torch.nn.Linear to Relay:
import tvm
from tvm import relay
import torch
# Create PyTorch eager model
in_features = 300
out_features = 100
m = torch.nn.Linear(in_features, out_features)
# Create PyTorch JIT-traced model
batch_size = 10
shape_list = [("input0", (batch_size, in_features))]
input = torch.randn(shape_list[0][1])
sm = torch.jit.trace(m, input)
# Set up TVM config
target = tvm.target.Target("cuda")
dtype = "float32"
# Import the PyTorch graph to Relay
mod, params = relay.frontend.from_pytorch(sm, shape_list)
This gives:
Traceback (most recent call last):
File "test_tvm_auto_scheduler.py", line 22, in <module>
mod, params = relay.frontend.from_pytorch(sm, shape_list)
File "/home/willfeng/tvm/python/tvm/relay/frontend/pytorch.py", line 3329, in from_pytorch
ret = converter.convert_operators(_get_operator_nodes(graph.nodes()), outputs, ret_name)[0]
File "/home/willfeng/tvm/python/tvm/relay/frontend/pytorch.py", line 2753, in convert_operators
self.record_output_type(relay_out)
File "/home/willfeng/tvm/python/tvm/relay/frontend/pytorch.py", line 219, in record_output_type
self.infer_type_with_prelude(output)
File "/home/willfeng/tvm/python/tvm/relay/frontend/pytorch.py", line 167, in infer_type_with_prelude
body = self.infer_type(val, self.prelude.mod)
File "/home/willfeng/tvm/python/tvm/relay/frontend/pytorch.py", line 160, in infer_type
new_mod = transform.InferType()(new_mod)
File "/home/willfeng/tvm/python/tvm/ir/transform.py", line 161, in __call__
return _ffi_transform_api.RunPass(self, mod)
File "/home/willfeng/tvm/python/tvm/_ffi/_ctypes/packed_func.py", line 237, in __call__
raise get_last_ffi_error()
tvm._ffi.base.TVMError: Traceback (most recent call last):
7: TVMFuncCall
6: std::_Function_handler<void (tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*), void tvm::runtime::TypedPackedFunc<tvm::IRModule (tvm::transform::Pass, tvm::IRModule)>::AssignTypedLambda<tvm::transform::{lambda(tvm::transform::Pass, tvm::IRModule)#7}>(tvm::transform::{lambda(tvm::transform::Pass, tvm::IRModule)#7}, std::string)::{lambda(tvm::runtime::TVMArgs const&, tvm::runtime::TVMRetValue*)#1}>::_M_invoke(std::_Any_data const&, tvm::runtime::TVMArgs&&, tvm::runtime::TVMRetValue*&&)
5: tvm::transform::Pass::operator()(tvm::IRModule) const
4: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
3: tvm::transform::ModulePassNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
2: std::_Function_handler<void (tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*), void tvm::runtime::TypedPackedFunc<tvm::IRModule (tvm::IRModule, tvm::transform::PassContext)>::AssignTypedLambda<tvm::relay::transform::InferType()::{lambda(tvm::IRModule, tvm::transform::PassContext const&)#1}>(tvm::relay::transform::InferType()::{lambda(tvm::IRModule, tvm::transform::PassContext const&)#1})::{lambda(tvm::runtime::TVMArgs const&, tvm::runtime::TVMRetValue*)#1}>::_M_invoke(std::_Any_data const&, tvm::runtime::TVMArgs&&, tvm::runtime::TVMRetValue*&&)
1: tvm::relay::TypeInferencer::Infer(tvm::GlobalVar, tvm::relay::Function)
0: tvm::relay::TypeSolver::Solve()
9: TVMFuncCall
8: std::_Function_handler<void (tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*), void tvm::runtime::TypedPackedFunc<tvm::IRModule (tvm::transform::Pass, tvm::IRModule)>::AssignTypedLambda<tvm::transform::{lambda(tvm::transform::Pass, tvm::IRModule)#7}>(tvm::transform::{lambda(tvm::transform::Pass, tvm::IRModule)#7}, std::string)::{lambda(tvm::runtime::TVMArgs const&, tvm::runtime::TVMRetValue*)#1}>::_M_invoke(std::_Any_data const&, tvm::runtime::TVMArgs&&, tvm::runtime::TVMRetValue*&&)
7: tvm::transform::Pass::operator()(tvm::IRModule) const
6: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
5: tvm::transform::ModulePassNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
4: std::_Function_handler<void (tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*), void tvm::runtime::TypedPackedFunc<tvm::IRModule (tvm::IRModule, tvm::transform::PassContext)>::AssignTypedLambda<tvm::relay::transform::InferType()::{lambda(tvm::IRModule, tvm::transform::PassContext const&)#1}>(tvm::relay::transform::InferType()::{lambda(tvm::IRModule, tvm::transform::PassContext const&)#1})::{lambda(tvm::runtime::TVMArgs const&, tvm::runtime::TVMRetValue*)#1}>::_M_invoke(std::_Any_data const&, tvm::runtime::TVMArgs&&, tvm::runtime::TVMRetValue*&&)
3: tvm::relay::TypeInferencer::Infer(tvm::GlobalVar, tvm::relay::Function)
2: tvm::relay::TypeSolver::Solve()
1: std::_Function_handler<void (tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*), void tvm::runtime::TypedPackedFunc<bool (tvm::runtime::Array<tvm::Type, void> const&, int, tvm::Attrs const&, tvm::TypeReporter const&)>::AssignTypedLambda<bool (*)(tvm::runtime::Array<tvm::Type, void> const&, int, tvm::Attrs const&, tvm::TypeReporter const&)>(bool (*)(tvm::runtime::Array<tvm::Type, void> const&, int, tvm::Attrs const&, tvm::TypeReporter const&))::{lambda(tvm::runtime::TVMArgs const&, tvm::runtime::TVMRetValue*)#1}>::_M_invoke(std::_Any_data const&, tvm::runtime::TVMArgs&&, tvm::runtime::TVMRetValue*&&)
0: bool tvm::relay::MatmulRel<tvm::relay::DenseAttrs>(tvm::runtime::Array<tvm::Type, void> const&, int, tvm::Attrs const&, tvm::TypeReporter const&)
File "/home/willfeng/tvm/src/relay/analysis/type_solver.cc", line 624
TVMError:
---------------------------------------------------------------
An error occurred during the execution of TVM.
For more information, please see: https://tvm.apache.org/docs/errors.html
---------------------------------------------------------------
Check failed: (false) is false: [23:55:13] /home/willfeng/tvm/src/relay/op/nn/nn.h:100:
---------------------------------------------------------------
An error occurred during the execution of TVM.
For more information, please see: https://tvm.apache.org/docs/errors.html
---------------------------------------------------------------
Check failed: ((transpose_b && reporter->AssertEQ(reduce, tensor_b->shape[1])) || (!transpose_b && reporter->AssertEQ(reduce, tensor_b->shape[0]))) is false: MatmulRel: input dimension doesn't match, tensor_a shape=[10, 300], tensor_b shape=[300, 100]
with the error
Check failed: ((transpose_b && reporter->AssertEQ(reduce, tensor_b->shape[1])) || (!transpose_b && reporter->AssertEQ(reduce, tensor_b->shape[0]))) is false: MatmulRel: input dimension doesn't match, tensor_a shape=[10, 300], tensor_b shape=[300, 100]
Wondering is there something obvious that I should fix? Thanks!