Hello, I have the following pytorch code:
import torch
import tvm
from tvm import relay
from tvm.contrib import graph_runtime
class PytorchModel(torch.nn.Module):
def __init__(self):
super(PytorchModel, self).__init__()
self.one = torch.ones(1, dtype=torch.int32)
self.one = torch.as_strided(self.one, (10 * 10,), (0,))
self.zero = torch.zeros(1, dtype=torch.int32)
self.zero = torch.as_strided(self.zero, (10 * 10,), (0,))
def forward(self, vector1, vector2):
resized_vector1 = vector1.view(1, -1)
resized_vector2 = vector2.view(-1, 1)
count = torch.where((resized_vector1 ==
resized_vector2).view(-1), self.one, self.zero).sum()
return count
vector1 = torch.zeros([10], dtype=torch.int32)
vector2 = torch.zeros([10], dtype=torch.int32)
for i in range(10):
vector1[i] = i
vector2[i] = i
init_model = PytorchModel()
scripted_model = torch.jit.script(init_model)
matches = scripted_model(vector1, vector2)
input1_name = 'input0'
input2_name = 'input1'
shape_list = [(input1_name, (10,)),
(input2_name, (10,))]
target = 'llvm'
ctx = tvm.cpu()
vector1_tvm = tvm.nd.array(vector1, ctx)
vector2_tvm = tvm.nd.array(vector2, ctx)
target = 'llvm'
ctx = tvm.cpu()
model, params = relay.frontend.from_pytorch(scripted_model, shape_list,
default_dtype="int32")
with tvm.transform.PassContext(opt_level=3):
executor = relay.create_executor("vm", mod=model, ctx=ctx, target=target)
tvm_model = executor.evaluate()
matches = tvm_model(input0=vector1_tvm, input1=vector2_tvm)
My llibrary versions are:
torch==1.6.0
tvm==0.7.dev1
When I am running the code I am receiving the following error:
WARNING:root:Untyped Tensor found, assume it is int32
WARNING:root:Untyped Tensor found, assume it is int32
WARNING:root:Untyped Tensor found, assume it is int32
WARNING:root:Untyped Tensor found, assume it is int32
WARNING:root:Untyped Tensor found, assume it is int32
WARNING:root:Untyped Tensor found, assume it is int32
WARNING:root:Untyped Tensor found, assume it is int32
Traceback (most recent call last):
File "reproduce_bug.py", line 51, in <module>
matches = tvm_model(input0=vector1_tvm, input1=vector2_tvm)
File "/home/dkoutsou/env/lib/python3.6/site-packages/tvm-0.7.dev1-py3.6-linux-x86_64.egg/tvm/relay/backend/vm.py", line 264, in _vm_wrapper
args = self._convert_args(main, args, kwargs)
File "/home/dkoutsou/env/lib/python3.6/site-packages/tvm-0.7.dev1-py3.6-linux-x86_64.egg/tvm/relay/backend/interpreter.py", line 120, in _convert_args
cargs.append(kwargs[name])
KeyError: 'one'
Do you have any idea how I should debug this? In the beginning I though that the problem is the naming of the variables but renaming self.one
to something else didn’t help. The same code works fine with vanilla TVM but with TVM VM it breaks.
Thanks very much for any help!