import torch
import torch.nn as nn
import torch.nn.functional as F
import os
os.environ["path"] = os.getenv("path") + ";" + "C:/Program Files/LLVM/bin"
import sys
sys.path.append("D:/workspace/tvm/python")
import tvm
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self):
super().__init__()
def forward(self, x1, x2):
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
x = torch.cat([x2, x1], dim=1)
return x
if __name__ == "__main__":
x1 = torch.randn(1, 32, 12, 14)
x2 = torch.randn(1, 32, 13, 14)
up = Up()
r = up(x1, x2)
traced_model = torch.jit.trace(up, (x1, x2))
shape_list = [(i.debugName().split('.')[0],
(i.type().sizes(), str(i.type().dtype()).split('.')[1])) for i in
list(traced_model.graph.inputs())[1:]]
mod, params = tvm.relay.frontend.pytorch.from_pytorch(traced_model, shape_list)
I am stumbled by an error when I converted an pytorch model containing concatenate op to relay. I draw the concerned part from the model code and form a simple test script as below.
1 Like