Annotation error with Vitis-AI in TVM

@jtuyls Thanks for your esteemed reply. Here are the outputs of mod[“main”].

Due to comment words limit, happen to remove %200 to %400 in below output FYI: For more details, please refer detailed output here (2 comments): AssertionError while annotating deeplabv3 model from pytorch · Issue #33 · Xilinx/pyxir · GitHub

%0 = nn.conv2d(%data, %model.backbone.conv1.weight, strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7]) /* ty=Tensor[(1, 64, 112, 112), float32] <em>/;
%1 = nn.batch_norm(%0, %model.backbone.bn1.weight, %model.backbone.bn1.bias, %model.backbone.bn1.running_mean, %model.backbone.bn1.running_var) /</em> ty=(Tensor[(1, 64, 112, 112), float32], Tensor[(64), float32], Tensor[(64), float32]) <em>/;
%2 = %1.0;
%3 = nn.relu(%2) /</em> ty=Tensor[(1, 64, 112, 112), float32] <em>/;
%4 = nn.max_pool2d(%3, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1]) /</em> ty=Tensor[(1, 64, 56, 56), float32] <em>/;
%5 = nn.conv2d(%4, %model.backbone.layer1.0.conv1.weight, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 64, 56, 56), float32] <em>/;
%6 = nn.batch_norm(%5, %model.backbone.layer1.0.bn1.weight, %model.backbone.layer1.0.bn1.bias, %model.backbone.layer1.0.bn1.running_mean, %model.backbone.layer1.0.bn1.running_var) /</em> ty=(Tensor[(1, 64, 56, 56), float32], Tensor[(64), float32], Tensor[(64), float32]) <em>/;
%7 = %6.0;
%8 = nn.relu(%7) /</em> ty=Tensor[(1, 64, 56, 56), float32] <em>/;
%9 = nn.conv2d(%8, %model.backbone.layer1.0.conv2.weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 64, 56, 56), float32] <em>/;
%10 = nn.batch_norm(%9, %model.backbone.layer1.0.bn2.weight, %model.backbone.layer1.0.bn2.bias, %model.backbone.layer1.0.bn2.running_mean, %model.backbone.layer1.0.bn2.running_var) /</em> ty=(Tensor[(1, 64, 56, 56), float32], Tensor[(64), float32], Tensor[(64), float32]) <em>/;
%11 = %10.0;
%12 = nn.relu(%11) /</em> ty=Tensor[(1, 64, 56, 56), float32] <em>/;
%13 = nn.conv2d(%12, %model.backbone.layer1.0.conv3.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 256, 56, 56), float32] <em>/;
%14 = nn.batch_norm(%13, %model.backbone.layer1.0.bn3.weight, %model.backbone.layer1.0.bn3.bias, %model.backbone.layer1.0.bn3.running_mean, %model.backbone.layer1.0.bn3.running_var) /</em> ty=(Tensor[(1, 256, 56, 56), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%15 = %14.0;
%16 = nn.conv2d(%4, %model.backbone.layer1.0.downsample.0.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 256, 56, 56), float32] <em>/;
%17 = nn.batch_norm(%16, %model.backbone.layer1.0.downsample.1.weight, %model.backbone.layer1.0.downsample.1.bias, %model.backbone.layer1.0.downsample.1.running_mean, %model.backbone.layer1.0.downsample.1.running_var) /</em> ty=(Tensor[(1, 256, 56, 56), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%18 = %17.0;
%19 = add(%15, %18) /</em> ty=Tensor[(1, 256, 56, 56), float32] <em>/;
%20 = nn.relu(%19) /</em> ty=Tensor[(1, 256, 56, 56), float32] <em>/;
%21 = nn.conv2d(%20, %model.backbone.layer1.1.conv1.weight, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 64, 56, 56), float32] <em>/;
%22 = nn.batch_norm(%21, %model.backbone.layer1.1.bn1.weight, %model.backbone.layer1.1.bn1.bias, %model.backbone.layer1.1.bn1.running_mean, %model.backbone.layer1.1.bn1.running_var) /</em> ty=(Tensor[(1, 64, 56, 56), float32], Tensor[(64), float32], Tensor[(64), float32]) <em>/;
%23 = %22.0;
%24 = nn.relu(%23) /</em> ty=Tensor[(1, 64, 56, 56), float32] <em>/;
%25 = nn.conv2d(%24, %model.backbone.layer1.1.conv2.weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 64, 56, 56), float32] <em>/;
%26 = nn.batch_norm(%25, %model.backbone.layer1.1.bn2.weight, %model.backbone.layer1.1.bn2.bias, %model.backbone.layer1.1.bn2.running_mean, %model.backbone.layer1.1.bn2.running_var) /</em> ty=(Tensor[(1, 64, 56, 56), float32], Tensor[(64), float32], Tensor[(64), float32]) <em>/;
%27 = %26.0;
%28 = nn.relu(%27) /</em> ty=Tensor[(1, 64, 56, 56), float32] <em>/;
%29 = nn.conv2d(%28, %model.backbone.layer1.1.conv3.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 256, 56, 56), float32] <em>/;
%30 = nn.batch_norm(%29, %model.backbone.layer1.1.bn3.weight, %model.backbone.layer1.1.bn3.bias, %model.backbone.layer1.1.bn3.running_mean, %model.backbone.layer1.1.bn3.running_var) /</em> ty=(Tensor[(1, 256, 56, 56), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%31 = %30.0;
%32 = add(%31, %20) /</em> ty=Tensor[(1, 256, 56, 56), float32] <em>/;
%33 = nn.relu(%32) /</em> ty=Tensor[(1, 256, 56, 56), float32] <em>/;
%34 = nn.conv2d(%33, %model.backbone.layer1.2.conv1.weight, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 64, 56, 56), float32] <em>/;
%35 = nn.batch_norm(%34, %model.backbone.layer1.2.bn1.weight, %model.backbone.layer1.2.bn1.bias, %model.backbone.layer1.2.bn1.running_mean, %model.backbone.layer1.2.bn1.running_var) /</em> ty=(Tensor[(1, 64, 56, 56), float32], Tensor[(64), float32], Tensor[(64), float32]) <em>/;
%36 = %35.0;
%37 = nn.relu(%36) /</em> ty=Tensor[(1, 64, 56, 56), float32] <em>/;
%38 = nn.conv2d(%37, %model.backbone.layer1.2.conv2.weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 64, 56, 56), float32] <em>/;
%39 = nn.batch_norm(%38, %model.backbone.layer1.2.bn2.weight, %model.backbone.layer1.2.bn2.bias, %model.backbone.layer1.2.bn2.running_mean, %model.backbone.layer1.2.bn2.running_var) /</em> ty=(Tensor[(1, 64, 56, 56), float32], Tensor[(64), float32], Tensor[(64), float32]) <em>/;
%40 = %39.0;
%41 = nn.relu(%40) /</em> ty=Tensor[(1, 64, 56, 56), float32] <em>/;
%42 = nn.conv2d(%41, %model.backbone.layer1.2.conv3.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 256, 56, 56), float32] <em>/;
%43 = nn.batch_norm(%42, %model.backbone.layer1.2.bn3.weight, %model.backbone.layer1.2.bn3.bias, %model.backbone.layer1.2.bn3.running_mean, %model.backbone.layer1.2.bn3.running_var) /</em> ty=(Tensor[(1, 256, 56, 56), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%44 = %43.0;
%45 = add(%44, %33) /</em> ty=Tensor[(1, 256, 56, 56), float32] <em>/;
%46 = nn.relu(%45) /</em> ty=Tensor[(1, 256, 56, 56), float32] <em>/;
%47 = nn.conv2d(%46, %model.backbone.layer2.0.conv1.weight, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 128, 56, 56), float32] <em>/;
%48 = nn.batch_norm(%47, %model.backbone.layer2.0.bn1.weight, %model.backbone.layer2.0.bn1.bias, %model.backbone.layer2.0.bn1.running_mean, %model.backbone.layer2.0.bn1.running_var) /</em> ty=(Tensor[(1, 128, 56, 56), float32], Tensor[(128), float32], Tensor[(128), float32]) <em>/;
%49 = %48.0;
%50 = nn.relu(%49) /</em> ty=Tensor[(1, 128, 56, 56), float32] <em>/;
%51 = nn.conv2d(%50, %model.backbone.layer2.0.conv2.weight, strides=[2, 2], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 128, 28, 28), float32] <em>/;
%52 = nn.batch_norm(%51, %model.backbone.layer2.0.bn2.weight, %model.backbone.layer2.0.bn2.bias, %model.backbone.layer2.0.bn2.running_mean, %model.backbone.layer2.0.bn2.running_var) /</em> ty=(Tensor[(1, 128, 28, 28), float32], Tensor[(128), float32], Tensor[(128), float32]) <em>/;
%53 = %52.0;
%54 = nn.relu(%53) /</em> ty=Tensor[(1, 128, 28, 28), float32] <em>/;
%55 = nn.conv2d(%54, %model.backbone.layer2.0.conv3.weight, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%56 = nn.batch_norm(%55, %model.backbone.layer2.0.bn3.weight, %model.backbone.layer2.0.bn3.bias, %model.backbone.layer2.0.bn3.running_mean, %model.backbone.layer2.0.bn3.running_var) /</em> ty=(Tensor[(1, 512, 28, 28), float32], Tensor[(512), float32], Tensor[(512), float32]) <em>/;
%57 = %56.0;
%58 = nn.conv2d(%46, %model.backbone.layer2.0.downsample.0.weight, strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%59 = nn.batch_norm(%58, %model.backbone.layer2.0.downsample.1.weight, %model.backbone.layer2.0.downsample.1.bias, %model.backbone.layer2.0.downsample.1.running_mean, %model.backbone.layer2.0.downsample.1.running_var) /</em> ty=(Tensor[(1, 512, 28, 28), float32], Tensor[(512), float32], Tensor[(512), float32]) <em>/;
%60 = %59.0;
%61 = add(%57, %60) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%62 = nn.relu(%61) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%63 = nn.conv2d(%62, %model.backbone.layer2.1.conv1.weight, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 128, 28, 28), float32] <em>/;
%64 = nn.batch_norm(%63, %model.backbone.layer2.1.bn1.weight, %model.backbone.layer2.1.bn1.bias, %model.backbone.layer2.1.bn1.running_mean, %model.backbone.layer2.1.bn1.running_var) /</em> ty=(Tensor[(1, 128, 28, 28), float32], Tensor[(128), float32], Tensor[(128), float32]) <em>/;
%65 = %64.0;
%66 = nn.relu(%65) /</em> ty=Tensor[(1, 128, 28, 28), float32] <em>/;
%67 = nn.conv2d(%66, %model.backbone.layer2.1.conv2.weight, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 128, 28, 28), float32] <em>/;
%68 = nn.batch_norm(%67, %model.backbone.layer2.1.bn2.weight, %model.backbone.layer2.1.bn2.bias, %model.backbone.layer2.1.bn2.running_mean, %model.backbone.layer2.1.bn2.running_var) /</em> ty=(Tensor[(1, 128, 28, 28), float32], Tensor[(128), float32], Tensor[(128), float32]) <em>/;
%69 = %68.0;
%70 = nn.relu(%69) /</em> ty=Tensor[(1, 128, 28, 28), float32] <em>/;
%71 = nn.conv2d(%70, %model.backbone.layer2.1.conv3.weight, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%72 = nn.batch_norm(%71, %model.backbone.layer2.1.bn3.weight, %model.backbone.layer2.1.bn3.bias, %model.backbone.layer2.1.bn3.running_mean, %model.backbone.layer2.1.bn3.running_var) /</em> ty=(Tensor[(1, 512, 28, 28), float32], Tensor[(512), float32], Tensor[(512), float32]) <em>/;
%73 = %72.0;
%74 = add(%73, %62) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%75 = nn.relu(%74) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%76 = nn.conv2d(%75, %model.backbone.layer2.2.conv1.weight, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 128, 28, 28), float32] <em>/;
%77 = nn.batch_norm(%76, %model.backbone.layer2.2.bn1.weight, %model.backbone.layer2.2.bn1.bias, %model.backbone.layer2.2.bn1.running_mean, %model.backbone.layer2.2.bn1.running_var) /</em> ty=(Tensor[(1, 128, 28, 28), float32], Tensor[(128), float32], Tensor[(128), float32]) <em>/;
%78 = %77.0;
%79 = nn.relu(%78) /</em> ty=Tensor[(1, 128, 28, 28), float32] <em>/;
%80 = nn.conv2d(%79, %model.backbone.layer2.2.conv2.weight, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 128, 28, 28), float32] <em>/;
%81 = nn.batch_norm(%80, %model.backbone.layer2.2.bn2.weight, %model.backbone.layer2.2.bn2.bias, %model.backbone.layer2.2.bn2.running_mean, %model.backbone.layer2.2.bn2.running_var) /</em> ty=(Tensor[(1, 128, 28, 28), float32], Tensor[(128), float32], Tensor[(128), float32]) <em>/;
%82 = %81.0;
%83 = nn.relu(%82) /</em> ty=Tensor[(1, 128, 28, 28), float32] <em>/;
%84 = nn.conv2d(%83, %model.backbone.layer2.2.conv3.weight, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%85 = nn.batch_norm(%84, %model.backbone.layer2.2.bn3.weight, %model.backbone.layer2.2.bn3.bias, %model.backbone.layer2.2.bn3.running_mean, %model.backbone.layer2.2.bn3.running_var) /</em> ty=(Tensor[(1, 512, 28, 28), float32], Tensor[(512), float32], Tensor[(512), float32]) <em>/;
%86 = %85.0;
%87 = add(%86, %75) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%88 = nn.relu(%87) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%89 = nn.conv2d(%88, %model.backbone.layer2.3.conv1.weight, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 128, 28, 28), float32] <em>/;
%90 = nn.batch_norm(%89, %model.backbone.layer2.3.bn1.weight, %model.backbone.layer2.3.bn1.bias, %model.backbone.layer2.3.bn1.running_mean, %model.backbone.layer2.3.bn1.running_var) /</em> ty=(Tensor[(1, 128, 28, 28), float32], Tensor[(128), float32], Tensor[(128), float32]) <em>/;
%91 = %90.0;
%92 = nn.relu(%91) /</em> ty=Tensor[(1, 128, 28, 28), float32] <em>/;
%93 = nn.conv2d(%92, %model.backbone.layer2.3.conv2.weight, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 128, 28, 28), float32] <em>/;
%94 = nn.batch_norm(%93, %model.backbone.layer2.3.bn2.weight, %model.backbone.layer2.3.bn2.bias, %model.backbone.layer2.3.bn2.running_mean, %model.backbone.layer2.3.bn2.running_var) /</em> ty=(Tensor[(1, 128, 28, 28), float32], Tensor[(128), float32], Tensor[(128), float32]) <em>/;
%95 = %94.0;
%96 = nn.relu(%95) /</em> ty=Tensor[(1, 128, 28, 28), float32] <em>/;
%97 = nn.conv2d(%96, %model.backbone.layer2.3.conv3.weight, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%98 = nn.batch_norm(%97, %model.backbone.layer2.3.bn3.weight, %model.backbone.layer2.3.bn3.bias, %model.backbone.layer2.3.bn3.running_mean, %model.backbone.layer2.3.bn3.running_var) /</em> ty=(Tensor[(1, 512, 28, 28), float32], Tensor[(512), float32], Tensor[(512), float32]) <em>/;
%99 = %98.0;
%100 = add(%99, %88) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%101 = nn.relu(%100) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%102 = nn.conv2d(%101, %model.backbone.layer3.0.conv1.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%103 = nn.batch_norm(%102, %model.backbone.layer3.0.bn1.weight, %model.backbone.layer3.0.bn1.bias, %model.backbone.layer3.0.bn1.running_mean, %model.backbone.layer3.0.bn1.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%104 = %103.0;
%105 = nn.relu(%104) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%106 = nn.conv2d(%105, %model.backbone.layer3.0.conv2.weight, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%107 = nn.batch_norm(%106, %model.backbone.layer3.0.bn2.weight, %model.backbone.layer3.0.bn2.bias, %model.backbone.layer3.0.bn2.running_mean, %model.backbone.layer3.0.bn2.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%108 = %107.0;
%109 = nn.relu(%108) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%110 = nn.conv2d(%109, %model.backbone.layer3.0.conv3.weight, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%111 = nn.batch_norm(%110, %model.backbone.layer3.0.bn3.weight, %model.backbone.layer3.0.bn3.bias, %model.backbone.layer3.0.bn3.running_mean, %model.backbone.layer3.0.bn3.running_var) /</em> ty=(Tensor[(1, 1024, 28, 28), float32], Tensor[(1024), float32], Tensor[(1024), float32]) <em>/;
%112 = %111.0;
%113 = nn.conv2d(%101, %model.backbone.layer3.0.downsample.0.weight, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%114 = nn.batch_norm(%113, %model.backbone.layer3.0.downsample.1.weight, %model.backbone.layer3.0.downsample.1.bias, %model.backbone.layer3.0.downsample.1.running_mean, %model.backbone.layer3.0.downsample.1.running_var) /</em> ty=(Tensor[(1, 1024, 28, 28), float32], Tensor[(1024), float32], Tensor[(1024), float32]) <em>/;
%115 = %114.0;
%116 = add(%112, %115) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%117 = nn.relu(%116) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%118 = nn.conv2d(%117, %model.backbone.layer3.1.conv1.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%119 = nn.batch_norm(%118, %model.backbone.layer3.1.bn1.weight, %model.backbone.layer3.1.bn1.bias, %model.backbone.layer3.1.bn1.running_mean, %model.backbone.layer3.1.bn1.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%120 = %119.0;
%121 = nn.relu(%120) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%122 = nn.conv2d(%121, %model.backbone.layer3.1.conv2.weight, padding=[2, 2, 2, 2], dilation=[2, 2], channels=256, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%123 = nn.batch_norm(%122, %model.backbone.layer3.1.bn2.weight, %model.backbone.layer3.1.bn2.bias, %model.backbone.layer3.1.bn2.running_mean, %model.backbone.layer3.1.bn2.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%124 = %123.0;
%125 = nn.relu(%124) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%126 = nn.conv2d(%125, %model.backbone.layer3.1.conv3.weight, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%127 = nn.batch_norm(%126, %model.backbone.layer3.1.bn3.weight, %model.backbone.layer3.1.bn3.bias, %model.backbone.layer3.1.bn3.running_mean, %model.backbone.layer3.1.bn3.running_var) /</em> ty=(Tensor[(1, 1024, 28, 28), float32], Tensor[(1024), float32], Tensor[(1024), float32]) <em>/;
%128 = %127.0;
%129 = add(%128, %117) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%130 = nn.relu(%129) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%131 = nn.conv2d(%130, %model.backbone.layer3.2.conv1.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%132 = nn.batch_norm(%131, %model.backbone.layer3.2.bn1.weight, %model.backbone.layer3.2.bn1.bias, %model.backbone.layer3.2.bn1.running_mean, %model.backbone.layer3.2.bn1.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%133 = %132.0;
%134 = nn.relu(%133) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%135 = nn.conv2d(%134, %model.backbone.layer3.2.conv2.weight, padding=[2, 2, 2, 2], dilation=[2, 2], channels=256, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%136 = nn.batch_norm(%135, %model.backbone.layer3.2.bn2.weight, %model.backbone.layer3.2.bn2.bias, %model.backbone.layer3.2.bn2.running_mean, %model.backbone.layer3.2.bn2.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%137 = %136.0;
%138 = nn.relu(%137) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%139 = nn.conv2d(%138, %model.backbone.layer3.2.conv3.weight, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%140 = nn.batch_norm(%139, %model.backbone.layer3.2.bn3.weight, %model.backbone.layer3.2.bn3.bias, %model.backbone.layer3.2.bn3.running_mean, %model.backbone.layer3.2.bn3.running_var) /</em> ty=(Tensor[(1, 1024, 28, 28), float32], Tensor[(1024), float32], Tensor[(1024), float32]) <em>/;
%141 = %140.0;
%142 = add(%141, %130) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%143 = nn.relu(%142) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%144 = nn.conv2d(%143, %model.backbone.layer3.3.conv1.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%145 = nn.batch_norm(%144, %model.backbone.layer3.3.bn1.weight, %model.backbone.layer3.3.bn1.bias, %model.backbone.layer3.3.bn1.running_mean, %model.backbone.layer3.3.bn1.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%146 = %145.0;
%147 = nn.relu(%146) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%148 = nn.conv2d(%147, %model.backbone.layer3.3.conv2.weight, padding=[2, 2, 2, 2], dilation=[2, 2], channels=256, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%149 = nn.batch_norm(%148, %model.backbone.layer3.3.bn2.weight, %model.backbone.layer3.3.bn2.bias, %model.backbone.layer3.3.bn2.running_mean, %model.backbone.layer3.3.bn2.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%150 = %149.0;
%151 = nn.relu(%150) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%152 = nn.conv2d(%151, %model.backbone.layer3.3.conv3.weight, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%153 = nn.batch_norm(%152, %model.backbone.layer3.3.bn3.weight, %model.backbone.layer3.3.bn3.bias, %model.backbone.layer3.3.bn3.running_mean, %model.backbone.layer3.3.bn3.running_var) /</em> ty=(Tensor[(1, 1024, 28, 28), float32], Tensor[(1024), float32], Tensor[(1024), float32]) <em>/;
%154 = %153.0;
%155 = add(%154, %143) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%156 = nn.relu(%155) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%157 = nn.conv2d(%156, %model.backbone.layer3.4.conv1.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%158 = nn.batch_norm(%157, %model.backbone.layer3.4.bn1.weight, %model.backbone.layer3.4.bn1.bias, %model.backbone.layer3.4.bn1.running_mean, %model.backbone.layer3.4.bn1.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%159 = %158.0;
%160 = nn.relu(%159) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%161 = nn.conv2d(%160, %model.backbone.layer3.4.conv2.weight, padding=[2, 2, 2, 2], dilation=[2, 2], channels=256, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%162 = nn.batch_norm(%161, %model.backbone.layer3.4.bn2.weight, %model.backbone.layer3.4.bn2.bias, %model.backbone.layer3.4.bn2.running_mean, %model.backbone.layer3.4.bn2.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%163 = %162.0;
%164 = nn.relu(%163) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%165 = nn.conv2d(%164, %model.backbone.layer3.4.conv3.weight, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%166 = nn.batch_norm(%165, %model.backbone.layer3.4.bn3.weight, %model.backbone.layer3.4.bn3.bias, %model.backbone.layer3.4.bn3.running_mean, %model.backbone.layer3.4.bn3.running_var) /</em> ty=(Tensor[(1, 1024, 28, 28), float32], Tensor[(1024), float32], Tensor[(1024), float32]) <em>/;
%167 = %166.0;
%168 = add(%167, %156) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%169 = nn.relu(%168) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%170 = nn.conv2d(%169, %model.backbone.layer3.5.conv1.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%171 = nn.batch_norm(%170, %model.backbone.layer3.5.bn1.weight, %model.backbone.layer3.5.bn1.bias, %model.backbone.layer3.5.bn1.running_mean, %model.backbone.layer3.5.bn1.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%172 = %171.0;
%173 = nn.relu(%172) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%174 = nn.conv2d(%173, %model.backbone.layer3.5.conv2.weight, padding=[2, 2, 2, 2], dilation=[2, 2], channels=256, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%175 = nn.batch_norm(%174, %model.backbone.layer3.5.bn2.weight, %model.backbone.layer3.5.bn2.bias, %model.backbone.layer3.5.bn2.running_mean, %model.backbone.layer3.5.bn2.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%176 = %175.0;
%177 = nn.relu(%176) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%178 = nn.conv2d(%177, %model.backbone.layer3.5.conv3.weight, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%179 = nn.batch_norm(%178, %model.backbone.layer3.5.bn3.weight, %model.backbone.layer3.5.bn3.bias, %model.backbone.layer3.5.bn3.running_mean, %model.backbone.layer3.5.bn3.running_var) /</em> ty=(Tensor[(1, 1024, 28, 28), float32], Tensor[(1024), float32], Tensor[(1024), float32]) <em>/;
%180 = %179.0;
%181 = add(%180, %169) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%182 = nn.relu(%181) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%183 = nn.conv2d(%182, %model.backbone.layer3.6.conv1.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%184 = nn.batch_norm(%183, %model.backbone.layer3.6.bn1.weight, %model.backbone.layer3.6.bn1.bias, %model.backbone.layer3.6.bn1.running_mean, %model.backbone.layer3.6.bn1.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%185 = %184.0;
%186 = nn.relu(%185) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%187 = nn.conv2d(%186, %model.backbone.layer3.6.conv2.weight, padding=[2, 2, 2, 2], dilation=[2, 2], channels=256, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%188 = nn.batch_norm(%187, %model.backbone.layer3.6.bn2.weight, %model.backbone.layer3.6.bn2.bias, %model.backbone.layer3.6.bn2.running_mean, %model.backbone.layer3.6.bn2.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%189 = %188.0;
%190 = nn.relu(%189) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%191 = nn.conv2d(%190, %model.backbone.layer3.6.conv3.weight, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%192 = nn.batch_norm(%191, %model.backbone.layer3.6.bn3.weight, %model.backbone.layer3.6.bn3.bias, %model.backbone.layer3.6.bn3.running_mean, %model.backbone.layer3.6.bn3.running_var) /</em> ty=(Tensor[(1, 1024, 28, 28), float32], Tensor[(1024), float32], Tensor[(1024), float32]) <em>/;
%193 = %192.0;
%194 = add(%193, %182) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%195 = nn.relu(%194) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%196 = nn.conv2d(%195, %model.backbone.layer3.7.conv1.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%197 = nn.batch_norm(%196, %model.backbone.layer3.7.bn1.weight, %model.backbone.layer3.7.bn1.bias, %model.backbone.layer3.7.bn1.running_mean, %model.backbone.layer3.7.bn1.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%198 = %197.0;
%199 = nn.relu(%198) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%200 = nn.conv2d(%199, %model.backbone.layer3.7.conv2.weight, padding=[2, 2, 2, 2], dilation=[2, 2], channels=256, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%400 = nn.batch_norm(%399, %model.backbone.layer3.22.bn3.weight, %model.backbone.layer3.22.bn3.bias, %model.backbone.layer3.22.bn3.running_mean, %model.backbone.layer3.22.bn3.running_var) /</em> ty=(Tensor[(1, 1024, 28, 28), float32], Tensor[(1024), float32], Tensor[(1024), float32]) <em>/;
%401 = %400.0;
%402 = add(%401, %390) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%403 = nn.relu(%402) /</em> ty=Tensor[(1, 1024, 28, 28), float32] <em>/;
%404 = nn.conv2d(%403, %model.backbone.layer4.0.conv1.weight, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%405 = nn.batch_norm(%404, %model.backbone.layer4.0.bn1.weight, %model.backbone.layer4.0.bn1.bias, %model.backbone.layer4.0.bn1.running_mean, %model.backbone.layer4.0.bn1.running_var) /</em> ty=(Tensor[(1, 512, 28, 28), float32], Tensor[(512), float32], Tensor[(512), float32]) <em>/;
%406 = %405.0;
%407 = nn.relu(%406) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%408 = nn.conv2d(%407, %model.backbone.layer4.0.conv2.weight, padding=[2, 2, 2, 2], dilation=[2, 2], channels=512, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%409 = nn.batch_norm(%408, %model.backbone.layer4.0.bn2.weight, %model.backbone.layer4.0.bn2.bias, %model.backbone.layer4.0.bn2.running_mean, %model.backbone.layer4.0.bn2.running_var) /</em> ty=(Tensor[(1, 512, 28, 28), float32], Tensor[(512), float32], Tensor[(512), float32]) <em>/;
%410 = %409.0;
%411 = nn.relu(%410) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%412 = nn.conv2d(%411, %model.backbone.layer4.0.conv3.weight, padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 2048, 28, 28), float32] <em>/;
%413 = nn.batch_norm(%412, %model.backbone.layer4.0.bn3.weight, %model.backbone.layer4.0.bn3.bias, %model.backbone.layer4.0.bn3.running_mean, %model.backbone.layer4.0.bn3.running_var) /</em> ty=(Tensor[(1, 2048, 28, 28), float32], Tensor[(2048), float32], Tensor[(2048), float32]) <em>/;
%414 = %413.0;
%415 = nn.conv2d(%403, %model.backbone.layer4.0.downsample.0.weight, padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 2048, 28, 28), float32] <em>/;
%416 = nn.batch_norm(%415, %model.backbone.layer4.0.downsample.1.weight, %model.backbone.layer4.0.downsample.1.bias, %model.backbone.layer4.0.downsample.1.running_mean, %model.backbone.layer4.0.downsample.1.running_var) /</em> ty=(Tensor[(1, 2048, 28, 28), float32], Tensor[(2048), float32], Tensor[(2048), float32]) <em>/;
%417 = %416.0;
%418 = add(%414, %417) /</em> ty=Tensor[(1, 2048, 28, 28), float32] <em>/;
%419 = nn.relu(%418) /</em> ty=Tensor[(1, 2048, 28, 28), float32] <em>/;
%420 = nn.conv2d(%419, %model.backbone.layer4.1.conv1.weight, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%421 = nn.batch_norm(%420, %model.backbone.layer4.1.bn1.weight, %model.backbone.layer4.1.bn1.bias, %model.backbone.layer4.1.bn1.running_mean, %model.backbone.layer4.1.bn1.running_var) /</em> ty=(Tensor[(1, 512, 28, 28), float32], Tensor[(512), float32], Tensor[(512), float32]) <em>/;
%422 = %421.0;
%423 = nn.relu(%422) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%424 = nn.conv2d(%423, %model.backbone.layer4.1.conv2.weight, padding=[4, 4, 4, 4], dilation=[4, 4], channels=512, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%425 = nn.batch_norm(%424, %model.backbone.layer4.1.bn2.weight, %model.backbone.layer4.1.bn2.bias, %model.backbone.layer4.1.bn2.running_mean, %model.backbone.layer4.1.bn2.running_var) /</em> ty=(Tensor[(1, 512, 28, 28), float32], Tensor[(512), float32], Tensor[(512), float32]) <em>/;
%426 = %425.0;
%427 = nn.relu(%426) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%428 = nn.conv2d(%427, %model.backbone.layer4.1.conv3.weight, padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 2048, 28, 28), float32] <em>/;
%429 = nn.batch_norm(%428, %model.backbone.layer4.1.bn3.weight, %model.backbone.layer4.1.bn3.bias, %model.backbone.layer4.1.bn3.running_mean, %model.backbone.layer4.1.bn3.running_var) /</em> ty=(Tensor[(1, 2048, 28, 28), float32], Tensor[(2048), float32], Tensor[(2048), float32]) <em>/;
%430 = %429.0;
%431 = add(%430, %419) /</em> ty=Tensor[(1, 2048, 28, 28), float32] <em>/;
%432 = nn.relu(%431) /</em> ty=Tensor[(1, 2048, 28, 28), float32] <em>/;
%433 = nn.conv2d(%432, %model.backbone.layer4.2.conv1.weight, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%434 = nn.batch_norm(%433, %model.backbone.layer4.2.bn1.weight, %model.backbone.layer4.2.bn1.bias, %model.backbone.layer4.2.bn1.running_mean, %model.backbone.layer4.2.bn1.running_var) /</em> ty=(Tensor[(1, 512, 28, 28), float32], Tensor[(512), float32], Tensor[(512), float32]) <em>/;
%435 = %434.0;
%436 = nn.relu(%435) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%437 = nn.conv2d(%436, %model.backbone.layer4.2.conv2.weight, padding=[4, 4, 4, 4], dilation=[4, 4], channels=512, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%438 = nn.batch_norm(%437, %model.backbone.layer4.2.bn2.weight, %model.backbone.layer4.2.bn2.bias, %model.backbone.layer4.2.bn2.running_mean, %model.backbone.layer4.2.bn2.running_var) /</em> ty=(Tensor[(1, 512, 28, 28), float32], Tensor[(512), float32], Tensor[(512), float32]) <em>/;
%439 = %438.0;
%440 = nn.relu(%439) /</em> ty=Tensor[(1, 512, 28, 28), float32] <em>/;
%441 = nn.conv2d(%440, %model.backbone.layer4.2.conv3.weight, padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 2048, 28, 28), float32] <em>/;
%442 = nn.batch_norm(%441, %model.backbone.layer4.2.bn3.weight, %model.backbone.layer4.2.bn3.bias, %model.backbone.layer4.2.bn3.running_mean, %model.backbone.layer4.2.bn3.running_var) /</em> ty=(Tensor[(1, 2048, 28, 28), float32], Tensor[(2048), float32], Tensor[(2048), float32]) <em>/;
%443 = %442.0;
%444 = add(%443, %432) /</em> ty=Tensor[(1, 2048, 28, 28), float32] <em>/;
%445 = nn.relu(%444) /</em> ty=Tensor[(1, 2048, 28, 28), float32] <em>/;
%446 = (%445, %403);
%447 = %446.1;
%448 = nn.conv2d(%447, %model.aux_classifier.0.weight, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%449 = nn.batch_norm(%448, %model.aux_classifier.1.weight, %model.aux_classifier.1.bias, %model.aux_classifier.1.running_mean, %model.aux_classifier.1.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%450 = %449.0;
%451 = nn.relu(%450) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%452 = nn.dropout(%451, rate=0.1f) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(1, 256, 28, 28), float32]) <em>/;
%453 = %452.0;
%454 = nn.conv2d(%453, %model.aux_classifier.4.weight, padding=[0, 0, 0, 0], channels=21, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 21, 28, 28), float32] <em>/;
%455 = nn.bias_add(%454, %model.aux_classifier.4.bias) /</em> ty=Tensor[(1, 21, 28, 28), float32] <em>/;
%456 = image.resize(%455, size=[224, 224]) /</em> ty=Tensor[(1, 21, 224, 224), float32] <em>/;
%457 = %446.0;
%458 = nn.conv2d(%457, %model.classifier.0.convs.0.0.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%459 = nn.batch_norm(%458, %model.classifier.0.convs.0.1.weight, %model.classifier.0.convs.0.1.bias, %model.classifier.0.convs.0.1.running_mean, %model.classifier.0.convs.0.1.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%460 = %459.0;
%461 = nn.relu(%460) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%462 = nn.conv2d(%457, %model.classifier.0.convs.1.0.weight, padding=[12, 12, 12, 12], dilation=[12, 12], channels=256, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%463 = nn.batch_norm(%462, %model.classifier.0.convs.1.1.weight, %model.classifier.0.convs.1.1.bias, %model.classifier.0.convs.1.1.running_mean, %model.classifier.0.convs.1.1.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%464 = %463.0;
%465 = nn.relu(%464) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%466 = nn.conv2d(%457, %model.classifier.0.convs.2.0.weight, padding=[24, 24, 24, 24], dilation=[24, 24], channels=256, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%467 = nn.batch_norm(%466, %model.classifier.0.convs.2.1.weight, %model.classifier.0.convs.2.1.bias, %model.classifier.0.convs.2.1.running_mean, %model.classifier.0.convs.2.1.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%468 = %467.0;
%469 = nn.relu(%468) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%470 = nn.conv2d(%457, %model.classifier.0.convs.3.0.weight, padding=[36, 36, 36, 36], dilation=[36, 36], channels=256, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%471 = nn.batch_norm(%470, %model.classifier.0.convs.3.1.weight, %model.classifier.0.convs.3.1.bias, %model.classifier.0.convs.3.1.running_mean, %model.classifier.0.convs.3.1.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%472 = %471.0;
%473 = nn.relu(%472) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%474 = nn.adaptive_avg_pool2d(%457, output_size=[1, 1]) /</em> ty=Tensor[(1, 2048, 1, 1), float32] <em>/;
%475 = nn.conv2d(%474, %model.classifier.0.convs.4.1.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 256, 1, 1), float32] <em>/;
%476 = nn.batch_norm(%475, %model.classifier.0.convs.4.2.weight, %model.classifier.0.convs.4.2.bias, %model.classifier.0.convs.4.2.running_mean, %model.classifier.0.convs.4.2.running_var) /</em> ty=(Tensor[(1, 256, 1, 1), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%477 = %476.0;
%478 = nn.relu(%477) /</em> ty=Tensor[(1, 256, 1, 1), float32] <em>/;
%479 = image.resize(%478, size=[28, 28]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%480 = (%461, %465, %469, %473, %479);
%481 = concatenate(%480, axis=1) /</em> ty=Tensor[(1, 1280, 28, 28), float32] <em>/;
%482 = nn.conv2d(%481, %model.classifier.0.project.0.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%483 = nn.batch_norm(%482, %model.classifier.0.project.1.weight, %model.classifier.0.project.1.bias, %model.classifier.0.project.1.running_mean, %model.classifier.0.project.1.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%484 = %483.0;
%485 = nn.relu(%484) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%486 = nn.dropout(%485) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(1, 256, 28, 28), float32]) <em>/;
%487 = %486.0;
%488 = nn.conv2d(%487, %model.classifier.1.weight, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%489 = nn.batch_norm(%488, %model.classifier.2.weight, %model.classifier.2.bias, %model.classifier.2.running_mean, %model.classifier.2.running_var) /</em> ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) <em>/;
%490 = %489.0;
%491 = nn.relu(%490) /</em> ty=Tensor[(1, 256, 28, 28), float32] <em>/;
%492 = nn.conv2d(%491, %model.classifier.4.weight, padding=[0, 0, 0, 0], channels=21, kernel_size=[1, 1]) /</em> ty=Tensor[(1, 21, 28, 28), float32] <em>/;
%493 = nn.bias_add(%492, %model.classifier.4.bias) /</em> ty=Tensor[(1, 21, 28, 28), float32] <em>/;
%494 = image.resize(%493, size=[224, 224]) /</em> ty=Tensor[(1, 21, 224, 224), float32] */;
%495 = (%456, %494);
%496 = %495.0;
%497 = %495.1;
(%496, %497)
}

@ abdulazizm

I have solved the problem that appeared a few days ago! However, the following error occurred when the build file was finally output to the edge device:

********************* Quantization Summary *********************

INFO: Output: quantize_eval_model: /tmp/tmpzjgk9dog/quantize_eval_model.pb deploy_model: /tmp/tmpzjgk9dog/deploy_model.pb PYXIR[INFO]: Not switching to specified runtime: vai after on-the-fly quantization as the model is compiled for a different target device.

Traceback (most recent call last): File “yolov3_tvm_host.py”, line 458, in lib_edge_dpu = relay.build(mod, target=tvm_target, params=params) File “/workspace/tvm/python/tvm/relay/build_module.py”, line 283, in build graph_json, runtime_mod, params = bld_mod.build(ir_mod, target, target_host, params) File “/workspace/tvm/python/tvm/relay/build_module.py”, line 132, in build self._build(mod, target, target_host) File “tvm/_ffi/_cython/./packed_func.pxi”, line 322, in tvm._ffi._cy3.core.PackedFuncBase.call File “tvm/_ffi/_cython/./packed_func.pxi”, line 257, in tvm._ffi._cy3.core.FuncCall File “tvm/_ffi/_cython/./packed_func.pxi”, line 246, in tvm._ffi._cy3.core.FuncCall3 File “tvm/_ffi/_cython/./base.pxi”, line 160, in tvm._ffi._cy3.core.CALL tvm._ffi.base.TVMError: Traceback (most recent call last): 7: TVMFuncCall 6: _ZNSt17_Function_handlerIFvN 5: tvm::relay::backend::RelayBuildModule::GetFunction(std::__cxx11::basic_string<char, std::char_traits, std::allocator > const&, tvm::runtime::ObjectPtrtvm::runtime::Object const&)::{lambda(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)#3}::operator()(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const 4: tvm::relay::backend::RelayBuildModule::BuildRelay(tvm::IRModule, std::unordered_map<std::__cxx11::basic_string<char, std::char_traits, std::allocator >, tvm::runtime::NDArray, std::hash<std::__cxx11::basic_string<char, std::char_traits, std::allocator > >, std::equal_to<std::__cxx11::basic_string<char, std::char_traits, std::allocator > >, std::allocator<std::pair<std::__cxx11::basic_string<char, std::char_traits, std::allocator > const, tvm::runtime::NDArray> > > const&) 3: std::_Function_handler<void (tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*), tvm::relay::backend::GraphRuntimeCodegenModule::GetFunction(std::__cxx11::basic_string<char, std::char_traits, std::allocator > const&, tvm::runtime::ObjectPtrtvm::runtime::Object const&)::{lambda(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)#2}>::_M_invoke(std::_Any_data const&, tvm::runtime::TVMArgs&&, tvm::runtime::TVMRetValue*&&) 2: tvm::relay::backend::GraphRuntimeCodegen::Codegen(tvm::relay::Function) 1: tvm::relay::CompileEngineImpl::LowerExternalFunctions() 0: std::_Function_handler<void (tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*), TVMFuncCreateFromCFunc::{lambda(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)#2}>::_M_invoke(std::_Any_data const&, tvm::runtime::TVMArgs&&, tvm::runtime::TVMRetValue*&&) File “tvm/_ffi/_cython/./packed_func.pxi”, line 56, in tvm._ffi._cy3.core.tvm_callback File “/workspace/tvm/python/tvm/contrib/target/vitis_ai.py”, line 156, in vitis_ai_compiler return fcreate(name, load_runtime_module, export_runtime_module) File “tvm/_ffi/_cython/./packed_func.pxi”, line 322, in tvm._ffi._cy3.core.PackedFuncBase.call File “tvm/_ffi/_cython/./packed_func.pxi”, line 257, in tvm._ffi._cy3.core.FuncCall File “tvm/_ffi/_cython/./packed_func.pxi”, line 246, in tvm._ffi._cy3.core.FuncCall3 File “tvm/_ffi/_cython/./base.pxi”, line 160, in tvm._ffi._cy3.core.CALL TVMError: Reading string from istringstream failed (vitis-ai-tensorflow) Vitis-AI /workspace/yolov3 >

@williamyang4978 How did you solve the issue? Are you able to make it onto the board and inference successfully?

@abdulazizm I can run yolov3 on TVM.

1 Like