@srkreddy1238
Error:
Traceback (most recent call last):
File “test_deploy_v2.py”, line 155, in
ftime = evaluate_network(network, target, target_host, “float32”, args.repeat)
File “test_deploy_v2.py”, line 90, in evaluate_network
module = runtime.GraphModule(rlib"default")
File “/home/test/tvm_clml/tvm/python/tvm/_ffi/_ctypes/packed_func.py”, line 238, in call
raise get_last_ffi_error()
tvm.error.RPCError: Traceback (most recent call last):
4: TVMFuncCall
3: tvm::runtime::RPCWrappedFunc::operator()(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*) const
2: tvm::runtime::RPCClientSession::CallFunc(void*, TVMValue const*, int const*, int, std::function<void (tvm::runtime::TVMArgs)> const&)
1: tvm::runtime::RPCEndpoint::CallFunc(void*, TVMValue const*, int const*, int, std::function<void (tvm::runtime::TVMArgs)>)
0: tvm::runtime::RPCEndpoint::HandleUntilReturnEvent(bool, std::function<void (tvm::runtime::TVMArgs)>)
File “/home/test/tvm_clml/tvm/src/runtime/rpc/rpc_endpoint.cc”, line 380
RPCError: Error caught from RPC call:
[12:57:26] /home/test/tvm_clml/tvm/src/runtime/contrib/clml/clml_runtime.cc:1299: InternalError: Check failed: (op && result == CL_SUCCESS) is false: Dense Error:-30
model::
def @ main(%data: Tensor[(1, 3, 224, 224), float32] /* ty=Tensor[(1, 3, 224, 224), float32] /) → Tensor[(1, 1000), float32] {
%0 = @ tvmgen_default_clml_main_0(%data) / ty=Tensor[(1, 1000), float32] /;
%1 = nn.bias_add(%0, meta[relay.Constant][0] / ty=Tensor[(1000), float32] /) / ty=Tensor[(1, 1000), float32] /;
@ tvmgen_default_clml_main_1(%1) / ty=Tensor[(1, 1000), float32] */
}
def @ tvmgen_default_clml_main_0(%clml_0_i0: Tensor[(1, 3, 224, 224), float32] /* ty=Tensor[(1, 3, 224, 224), float32] /, Compiler=“clml”, Primitive=1, Inline=1, global_symbol=“tvmgen_default_clml_main_0”) → Tensor[(1, 1000), float32] {
%83 = fn (%FunctionVar_26_0: Tensor[(1, 3, 224, 224), float32] / ty=Tensor[(1, 3, 224, 224), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 32, 112, 112), float32] {
%80 = nn.conv2d(%FunctionVar_26_0, meta[relay.Constant][132] / ty=Tensor[(32, 3, 3, 3), float32] /, strides=[2, 2], padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3]) / ty=Tensor[(1, 32, 112, 112), float32] /;
%81 = nn.batch_norm(%80, meta[relay.Constant][133] / ty=Tensor[(32), float32] /, meta[relay.Constant][134] / ty=Tensor[(32), float32] /, meta[relay.Constant][135] / ty=Tensor[(32), float32] /, meta[relay.Constant][136] / ty=Tensor[(32), float32] /) / ty=(Tensor[(1, 32, 112, 112), float32], Tensor[(32), float32], Tensor[(32), float32]) /;
%82 = %81.0 / ty=Tensor[(1, 32, 112, 112), float32] /;
nn.relu(%82) / ty=Tensor[(1, 32, 112, 112), float32] /
} / ty=fn (Tensor[(1, 3, 224, 224), float32]) → Tensor[(1, 32, 112, 112), float32] /;
%84 = %83(%clml_0_i0) / ty=Tensor[(1, 32, 112, 112), float32] /;
%85 = fn (%FunctionVar_25_0: Tensor[(1, 32, 112, 112), float32] / ty=Tensor[(1, 32, 112, 112), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 32, 112, 112), float32] {
%77 = nn.conv2d(%FunctionVar_25_0, meta[relay.Constant][127] / ty=Tensor[(1, 32, 3, 3), float32] /, padding=[1, 1, 1, 1], groups=32, channels=32, kernel_size=[3, 3], kernel_layout=“IOHW”) / ty=Tensor[(1, 32, 112, 112), float32] /;
%78 = nn.batch_norm(%77, meta[relay.Constant][128] / ty=Tensor[(32), float32] /, meta[relay.Constant][129] / ty=Tensor[(32), float32] /, meta[relay.Constant][130] / ty=Tensor[(32), float32] /, meta[relay.Constant][131] / ty=Tensor[(32), float32] /) / ty=(Tensor[(1, 32, 112, 112), float32], Tensor[(32), float32], Tensor[(32), float32]) /;
%79 = %78.0 / ty=Tensor[(1, 32, 112, 112), float32] /;
nn.relu(%79) / ty=Tensor[(1, 32, 112, 112), float32] /
} / ty=fn (Tensor[(1, 32, 112, 112), float32]) → Tensor[(1, 32, 112, 112), float32] /;
%86 = %85(%84) / ty=Tensor[(1, 32, 112, 112), float32] /;
%87 = fn (%FunctionVar_24_0: Tensor[(1, 32, 112, 112), float32] / ty=Tensor[(1, 32, 112, 112), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 64, 112, 112), float32] {
%74 = nn.conv2d(%FunctionVar_24_0, meta[relay.Constant][122] / ty=Tensor[(64, 32, 1, 1), float32] /, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) / ty=Tensor[(1, 64, 112, 112), float32] /;
%75 = nn.batch_norm(%74, meta[relay.Constant][123] / ty=Tensor[(64), float32] /, meta[relay.Constant][124] / ty=Tensor[(64), float32] /, meta[relay.Constant][125] / ty=Tensor[(64), float32] /, meta[relay.Constant][126] / ty=Tensor[(64), float32] /) / ty=(Tensor[(1, 64, 112, 112), float32], Tensor[(64), float32], Tensor[(64), float32]) /;
%76 = %75.0 / ty=Tensor[(1, 64, 112, 112), float32] /;
nn.relu(%76) / ty=Tensor[(1, 64, 112, 112), float32] /
} / ty=fn (Tensor[(1, 32, 112, 112), float32]) → Tensor[(1, 64, 112, 112), float32] /;
%88 = %87(%86) / ty=Tensor[(1, 64, 112, 112), float32] /;
%89 = fn (%FunctionVar_23_0: Tensor[(1, 64, 112, 112), float32] / ty=Tensor[(1, 64, 112, 112), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 64, 56, 56), float32] {
%71 = nn.conv2d(%FunctionVar_23_0, meta[relay.Constant][117] / ty=Tensor[(1, 64, 3, 3), float32] /, strides=[2, 2], padding=[1, 1, 1, 1], groups=64, channels=64, kernel_size=[3, 3], kernel_layout=“IOHW”) / ty=Tensor[(1, 64, 56, 56), float32] /;
%72 = nn.batch_norm(%71, meta[relay.Constant][118] / ty=Tensor[(64), float32] /, meta[relay.Constant][119] / ty=Tensor[(64), float32] /, meta[relay.Constant][120] / ty=Tensor[(64), float32] /, meta[relay.Constant][121] / ty=Tensor[(64), float32] /) / ty=(Tensor[(1, 64, 56, 56), float32], Tensor[(64), float32], Tensor[(64), float32]) /;
%73 = %72.0 / ty=Tensor[(1, 64, 56, 56), float32] /;
nn.relu(%73) / ty=Tensor[(1, 64, 56, 56), float32] /
} / ty=fn (Tensor[(1, 64, 112, 112), float32]) → Tensor[(1, 64, 56, 56), float32] /;
%90 = %89(%88) / ty=Tensor[(1, 64, 56, 56), float32] /;
%91 = fn (%FunctionVar_22_0: Tensor[(1, 64, 56, 56), float32] / ty=Tensor[(1, 64, 56, 56), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 128, 56, 56), float32] {
%68 = nn.conv2d(%FunctionVar_22_0, meta[relay.Constant][112] / ty=Tensor[(128, 64, 1, 1), float32] /, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) / ty=Tensor[(1, 128, 56, 56), float32] /;
%69 = nn.batch_norm(%68, meta[relay.Constant][113] / ty=Tensor[(128), float32] /, meta[relay.Constant][114] / ty=Tensor[(128), float32] /, meta[relay.Constant][115] / ty=Tensor[(128), float32] /, meta[relay.Constant][116] / ty=Tensor[(128), float32] /) / ty=(Tensor[(1, 128, 56, 56), float32], Tensor[(128), float32], Tensor[(128), float32]) /;
%70 = %69.0 / ty=Tensor[(1, 128, 56, 56), float32] /;
nn.relu(%70) / ty=Tensor[(1, 128, 56, 56), float32] /
} / ty=fn (Tensor[(1, 64, 56, 56), float32]) → Tensor[(1, 128, 56, 56), float32] /;
%92 = %91(%90) / ty=Tensor[(1, 128, 56, 56), float32] /;
%93 = fn (%FunctionVar_21_0: Tensor[(1, 128, 56, 56), float32] / ty=Tensor[(1, 128, 56, 56), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 128, 56, 56), float32] {
%65 = nn.conv2d(%FunctionVar_21_0, meta[relay.Constant][107] / ty=Tensor[(1, 128, 3, 3), float32] /, padding=[1, 1, 1, 1], groups=128, channels=128, kernel_size=[3, 3], kernel_layout=“IOHW”) / ty=Tensor[(1, 128, 56, 56), float32] /;
%66 = nn.batch_norm(%65, meta[relay.Constant][108] / ty=Tensor[(128), float32] /, meta[relay.Constant][109] / ty=Tensor[(128), float32] /, meta[relay.Constant][110] / ty=Tensor[(128), float32] /, meta[relay.Constant][111] / ty=Tensor[(128), float32] /) / ty=(Tensor[(1, 128, 56, 56), float32], Tensor[(128), float32], Tensor[(128), float32]) /;
%67 = %66.0 / ty=Tensor[(1, 128, 56, 56), float32] /;
nn.relu(%67) / ty=Tensor[(1, 128, 56, 56), float32] /
} / ty=fn (Tensor[(1, 128, 56, 56), float32]) → Tensor[(1, 128, 56, 56), float32] /;
%94 = %93(%92) / ty=Tensor[(1, 128, 56, 56), float32] /;
%95 = fn (%FunctionVar_20_0: Tensor[(1, 128, 56, 56), float32] / ty=Tensor[(1, 128, 56, 56), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 128, 56, 56), float32] {
%62 = nn.conv2d(%FunctionVar_20_0, meta[relay.Constant][102] / ty=Tensor[(128, 128, 1, 1), float32] /, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) / ty=Tensor[(1, 128, 56, 56), float32] /;
%63 = nn.batch_norm(%62, meta[relay.Constant][103] / ty=Tensor[(128), float32] /, meta[relay.Constant][104] / ty=Tensor[(128), float32] /, meta[relay.Constant][105] / ty=Tensor[(128), float32] /, meta[relay.Constant][106] / ty=Tensor[(128), float32] /) / ty=(Tensor[(1, 128, 56, 56), float32], Tensor[(128), float32], Tensor[(128), float32]) /;
%64 = %63.0 / ty=Tensor[(1, 128, 56, 56), float32] /;
nn.relu(%64) / ty=Tensor[(1, 128, 56, 56), float32] /
} / ty=fn (Tensor[(1, 128, 56, 56), float32]) → Tensor[(1, 128, 56, 56), float32] /;
%96 = %95(%94) / ty=Tensor[(1, 128, 56, 56), float32] /;
%97 = fn (%FunctionVar_19_0: Tensor[(1, 128, 56, 56), float32] / ty=Tensor[(1, 128, 56, 56), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 128, 28, 28), float32] {
%59 = nn.conv2d(%FunctionVar_19_0, meta[relay.Constant][97] / ty=Tensor[(1, 128, 3, 3), float32] /, strides=[2, 2], padding=[1, 1, 1, 1], groups=128, channels=128, kernel_size=[3, 3], kernel_layout=“IOHW”) / ty=Tensor[(1, 128, 28, 28), float32] /;
%60 = nn.batch_norm(%59, meta[relay.Constant][98] / ty=Tensor[(128), float32] /, meta[relay.Constant][99] / ty=Tensor[(128), float32] /, meta[relay.Constant][100] / ty=Tensor[(128), float32] /, meta[relay.Constant][101] / ty=Tensor[(128), float32] /) / ty=(Tensor[(1, 128, 28, 28), float32], Tensor[(128), float32], Tensor[(128), float32]) /;
%61 = %60.0 / ty=Tensor[(1, 128, 28, 28), float32] /;
nn.relu(%61) / ty=Tensor[(1, 128, 28, 28), float32] /
} / ty=fn (Tensor[(1, 128, 56, 56), float32]) → Tensor[(1, 128, 28, 28), float32] /;
%98 = %97(%96) / ty=Tensor[(1, 128, 28, 28), float32] /;
%99 = fn (%FunctionVar_18_0: Tensor[(1, 128, 28, 28), float32] / ty=Tensor[(1, 128, 28, 28), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 256, 28, 28), float32] {
%56 = nn.conv2d(%FunctionVar_18_0, meta[relay.Constant][92] / ty=Tensor[(256, 128, 1, 1), float32] /, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) / ty=Tensor[(1, 256, 28, 28), float32] /;
%57 = nn.batch_norm(%56, meta[relay.Constant][93] / ty=Tensor[(256), float32] /, meta[relay.Constant][94] / ty=Tensor[(256), float32] /, meta[relay.Constant][95] / ty=Tensor[(256), float32] /, meta[relay.Constant][96] / ty=Tensor[(256), float32] /) / ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) /;
%58 = %57.0 / ty=Tensor[(1, 256, 28, 28), float32] /;
nn.relu(%58) / ty=Tensor[(1, 256, 28, 28), float32] /
} / ty=fn (Tensor[(1, 128, 28, 28), float32]) → Tensor[(1, 256, 28, 28), float32] /;
%100 = %99(%98) / ty=Tensor[(1, 256, 28, 28), float32] /;
%101 = fn (%FunctionVar_17_0: Tensor[(1, 256, 28, 28), float32] / ty=Tensor[(1, 256, 28, 28), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 256, 28, 28), float32] {
%53 = nn.conv2d(%FunctionVar_17_0, meta[relay.Constant][87] / ty=Tensor[(1, 256, 3, 3), float32] /, padding=[1, 1, 1, 1], groups=256, channels=256, kernel_size=[3, 3], kernel_layout=“IOHW”) / ty=Tensor[(1, 256, 28, 28), float32] /;
%54 = nn.batch_norm(%53, meta[relay.Constant][88] / ty=Tensor[(256), float32] /, meta[relay.Constant][89] / ty=Tensor[(256), float32] /, meta[relay.Constant][90] / ty=Tensor[(256), float32] /, meta[relay.Constant][91] / ty=Tensor[(256), float32] /) / ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) /;
%55 = %54.0 / ty=Tensor[(1, 256, 28, 28), float32] /;
nn.relu(%55) / ty=Tensor[(1, 256, 28, 28), float32] /
} / ty=fn (Tensor[(1, 256, 28, 28), float32]) → Tensor[(1, 256, 28, 28), float32] /;
%102 = %101(%100) / ty=Tensor[(1, 256, 28, 28), float32] /;
%103 = fn (%FunctionVar_16_0: Tensor[(1, 256, 28, 28), float32] / ty=Tensor[(1, 256, 28, 28), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 256, 28, 28), float32] {
%50 = nn.conv2d(%FunctionVar_16_0, meta[relay.Constant][82] / ty=Tensor[(256, 256, 1, 1), float32] /, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) / ty=Tensor[(1, 256, 28, 28), float32] /;
%51 = nn.batch_norm(%50, meta[relay.Constant][83] / ty=Tensor[(256), float32] /, meta[relay.Constant][84] / ty=Tensor[(256), float32] /, meta[relay.Constant][85] / ty=Tensor[(256), float32] /, meta[relay.Constant][86] / ty=Tensor[(256), float32] /) / ty=(Tensor[(1, 256, 28, 28), float32], Tensor[(256), float32], Tensor[(256), float32]) /;
%52 = %51.0 / ty=Tensor[(1, 256, 28, 28), float32] /;
nn.relu(%52) / ty=Tensor[(1, 256, 28, 28), float32] /
} / ty=fn (Tensor[(1, 256, 28, 28), float32]) → Tensor[(1, 256, 28, 28), float32] /;
%104 = %103(%102) / ty=Tensor[(1, 256, 28, 28), float32] /;
%105 = fn (%FunctionVar_15_0: Tensor[(1, 256, 28, 28), float32] / ty=Tensor[(1, 256, 28, 28), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 256, 14, 14), float32] {
%47 = nn.conv2d(%FunctionVar_15_0, meta[relay.Constant][77] / ty=Tensor[(1, 256, 3, 3), float32] /, strides=[2, 2], padding=[1, 1, 1, 1], groups=256, channels=256, kernel_size=[3, 3], kernel_layout=“IOHW”) / ty=Tensor[(1, 256, 14, 14), float32] /;
%48 = nn.batch_norm(%47, meta[relay.Constant][78] / ty=Tensor[(256), float32] /, meta[relay.Constant][79] / ty=Tensor[(256), float32] /, meta[relay.Constant][80] / ty=Tensor[(256), float32] /, meta[relay.Constant][81] / ty=Tensor[(256), float32] /) / ty=(Tensor[(1, 256, 14, 14), float32], Tensor[(256), float32], Tensor[(256), float32]) /;
%49 = %48.0 / ty=Tensor[(1, 256, 14, 14), float32] /;
nn.relu(%49) / ty=Tensor[(1, 256, 14, 14), float32] /
} / ty=fn (Tensor[(1, 256, 28, 28), float32]) → Tensor[(1, 256, 14, 14), float32] /;
%106 = %105(%104) / ty=Tensor[(1, 256, 14, 14), float32] /;
%107 = fn (%FunctionVar_14_0: Tensor[(1, 256, 14, 14), float32] / ty=Tensor[(1, 256, 14, 14), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 512, 14, 14), float32] {
%44 = nn.conv2d(%FunctionVar_14_0, meta[relay.Constant][72] / ty=Tensor[(512, 256, 1, 1), float32] /, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) / ty=Tensor[(1, 512, 14, 14), float32] /;
%45 = nn.batch_norm(%44, meta[relay.Constant][73] / ty=Tensor[(512), float32] /, meta[relay.Constant][74] / ty=Tensor[(512), float32] /, meta[relay.Constant][75] / ty=Tensor[(512), float32] /, meta[relay.Constant][76] / ty=Tensor[(512), float32] /) / ty=(Tensor[(1, 512, 14, 14), float32], Tensor[(512), float32], Tensor[(512), float32]) /;
%46 = %45.0 / ty=Tensor[(1, 512, 14, 14), float32] /;
nn.relu(%46) / ty=Tensor[(1, 512, 14, 14), float32] /
} / ty=fn (Tensor[(1, 256, 14, 14), float32]) → Tensor[(1, 512, 14, 14), float32] /;
%108 = %107(%106) / ty=Tensor[(1, 512, 14, 14), float32] /;
%109 = fn (%FunctionVar_13_0: Tensor[(1, 512, 14, 14), float32] / ty=Tensor[(1, 512, 14, 14), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 512, 14, 14), float32] {
%41 = nn.conv2d(%FunctionVar_13_0, meta[relay.Constant][67] / ty=Tensor[(1, 512, 3, 3), float32] /, padding=[1, 1, 1, 1], groups=512, channels=512, kernel_size=[3, 3], kernel_layout=“IOHW”) / ty=Tensor[(1, 512, 14, 14), float32] /;
%42 = nn.batch_norm(%41, meta[relay.Constant][68] / ty=Tensor[(512), float32] /, meta[relay.Constant][69] / ty=Tensor[(512), float32] /, meta[relay.Constant][70] / ty=Tensor[(512), float32] /, meta[relay.Constant][71] / ty=Tensor[(512), float32] /) / ty=(Tensor[(1, 512, 14, 14), float32], Tensor[(512), float32], Tensor[(512), float32]) /;
%43 = %42.0 / ty=Tensor[(1, 512, 14, 14), float32] /;
nn.relu(%43) / ty=Tensor[(1, 512, 14, 14), float32] /
} / ty=fn (Tensor[(1, 512, 14, 14), float32]) → Tensor[(1, 512, 14, 14), float32] /;
%110 = %109(%108) / ty=Tensor[(1, 512, 14, 14), float32] /;
%111 = fn (%FunctionVar_12_0: Tensor[(1, 512, 14, 14), float32] / ty=Tensor[(1, 512, 14, 14), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 512, 14, 14), float32] {
%38 = nn.conv2d(%FunctionVar_12_0, meta[relay.Constant][62] / ty=Tensor[(512, 512, 1, 1), float32] /, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) / ty=Tensor[(1, 512, 14, 14), float32] /;
%39 = nn.batch_norm(%38, meta[relay.Constant][63] / ty=Tensor[(512), float32] /, meta[relay.Constant][64] / ty=Tensor[(512), float32] /, meta[relay.Constant][65] / ty=Tensor[(512), float32] /, meta[relay.Constant][66] / ty=Tensor[(512), float32] /) / ty=(Tensor[(1, 512, 14, 14), float32], Tensor[(512), float32], Tensor[(512), float32]) /;
%40 = %39.0 / ty=Tensor[(1, 512, 14, 14), float32] /;
nn.relu(%40) / ty=Tensor[(1, 512, 14, 14), float32] /
} / ty=fn (Tensor[(1, 512, 14, 14), float32]) → Tensor[(1, 512, 14, 14), float32] /;
%112 = %111(%110) / ty=Tensor[(1, 512, 14, 14), float32] /;
%113 = fn (%FunctionVar_11_0: Tensor[(1, 512, 14, 14), float32] / ty=Tensor[(1, 512, 14, 14), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 512, 14, 14), float32] {
%35 = nn.conv2d(%FunctionVar_11_0, meta[relay.Constant][57] / ty=Tensor[(1, 512, 3, 3), float32] /, padding=[1, 1, 1, 1], groups=512, channels=512, kernel_size=[3, 3], kernel_layout=“IOHW”) / ty=Tensor[(1, 512, 14, 14), float32] /;
%36 = nn.batch_norm(%35, meta[relay.Constant][58] / ty=Tensor[(512), float32] /, meta[relay.Constant][59] / ty=Tensor[(512), float32] /, meta[relay.Constant][60] / ty=Tensor[(512), float32] /, meta[relay.Constant][61] / ty=Tensor[(512), float32] /) / ty=(Tensor[(1, 512, 14, 14), float32], Tensor[(512), float32], Tensor[(512), float32]) /;
%37 = %36.0 / ty=Tensor[(1, 512, 14, 14), float32] /;
nn.relu(%37) / ty=Tensor[(1, 512, 14, 14), float32] /
} / ty=fn (Tensor[(1, 512, 14, 14), float32]) → Tensor[(1, 512, 14, 14), float32] /;
%114 = %113(%112) / ty=Tensor[(1, 512, 14, 14), float32] /;
%115 = fn (%FunctionVar_10_0: Tensor[(1, 512, 14, 14), float32] / ty=Tensor[(1, 512, 14, 14), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 512, 14, 14), float32] {
%32 = nn.conv2d(%FunctionVar_10_0, meta[relay.Constant][52] / ty=Tensor[(512, 512, 1, 1), float32] /, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) / ty=Tensor[(1, 512, 14, 14), float32] /;
%33 = nn.batch_norm(%32, meta[relay.Constant][53] / ty=Tensor[(512), float32] /, meta[relay.Constant][54] / ty=Tensor[(512), float32] /, meta[relay.Constant][55] / ty=Tensor[(512), float32] /, meta[relay.Constant][56] / ty=Tensor[(512), float32] /) / ty=(Tensor[(1, 512, 14, 14), float32], Tensor[(512), float32], Tensor[(512), float32]) /;
%34 = %33.0 / ty=Tensor[(1, 512, 14, 14), float32] /;
nn.relu(%34) / ty=Tensor[(1, 512, 14, 14), float32] /
} / ty=fn (Tensor[(1, 512, 14, 14), float32]) → Tensor[(1, 512, 14, 14), float32] /;
%116 = %115(%114) / ty=Tensor[(1, 512, 14, 14), float32] /;
%117 = fn (%FunctionVar_9_0: Tensor[(1, 512, 14, 14), float32] / ty=Tensor[(1, 512, 14, 14), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 512, 14, 14), float32] {
%29 = nn.conv2d(%FunctionVar_9_0, meta[relay.Constant][47] / ty=Tensor[(1, 512, 3, 3), float32] /, padding=[1, 1, 1, 1], groups=512, channels=512, kernel_size=[3, 3], kernel_layout=“IOHW”) / ty=Tensor[(1, 512, 14, 14), float32] /;
%30 = nn.batch_norm(%29, meta[relay.Constant][48] / ty=Tensor[(512), float32] /, meta[relay.Constant][49] / ty=Tensor[(512), float32] /, meta[relay.Constant][50] / ty=Tensor[(512), float32] /, meta[relay.Constant][51] / ty=Tensor[(512), float32] /) / ty=(Tensor[(1, 512, 14, 14), float32], Tensor[(512), float32], Tensor[(512), float32]) /;
%31 = %30.0 / ty=Tensor[(1, 512, 14, 14), float32] /;
nn.relu(%31) / ty=Tensor[(1, 512, 14, 14), float32] /
} / ty=fn (Tensor[(1, 512, 14, 14), float32]) → Tensor[(1, 512, 14, 14), float32] /;
%118 = %117(%116) / ty=Tensor[(1, 512, 14, 14), float32] /;
%119 = fn (%FunctionVar_8_0: Tensor[(1, 512, 14, 14), float32] / ty=Tensor[(1, 512, 14, 14), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 512, 14, 14), float32] {
%26 = nn.conv2d(%FunctionVar_8_0, meta[relay.Constant][42] / ty=Tensor[(512, 512, 1, 1), float32] /, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) / ty=Tensor[(1, 512, 14, 14), float32] /;
%27 = nn.batch_norm(%26, meta[relay.Constant][43] / ty=Tensor[(512), float32] /, meta[relay.Constant][44] / ty=Tensor[(512), float32] /, meta[relay.Constant][45] / ty=Tensor[(512), float32] /, meta[relay.Constant][46] / ty=Tensor[(512), float32] /) / ty=(Tensor[(1, 512, 14, 14), float32], Tensor[(512), float32], Tensor[(512), float32]) /;
%28 = %27.0 / ty=Tensor[(1, 512, 14, 14), float32] /;
nn.relu(%28) / ty=Tensor[(1, 512, 14, 14), float32] /
} / ty=fn (Tensor[(1, 512, 14, 14), float32]) → Tensor[(1, 512, 14, 14), float32] /;
%120 = %119(%118) / ty=Tensor[(1, 512, 14, 14), float32] /;
%121 = fn (%FunctionVar_7_0: Tensor[(1, 512, 14, 14), float32] / ty=Tensor[(1, 512, 14, 14), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 512, 14, 14), float32] {
%23 = nn.conv2d(%FunctionVar_7_0, meta[relay.Constant][37] / ty=Tensor[(1, 512, 3, 3), float32] /, padding=[1, 1, 1, 1], groups=512, channels=512, kernel_size=[3, 3], kernel_layout=“IOHW”) / ty=Tensor[(1, 512, 14, 14), float32] /;
%24 = nn.batch_norm(%23, meta[relay.Constant][38] / ty=Tensor[(512), float32] /, meta[relay.Constant][39] / ty=Tensor[(512), float32] /, meta[relay.Constant][40] / ty=Tensor[(512), float32] /, meta[relay.Constant][41] / ty=Tensor[(512), float32] /) / ty=(Tensor[(1, 512, 14, 14), float32], Tensor[(512), float32], Tensor[(512), float32]) /;
%25 = %24.0 / ty=Tensor[(1, 512, 14, 14), float32] /;
nn.relu(%25) / ty=Tensor[(1, 512, 14, 14), float32] /
} / ty=fn (Tensor[(1, 512, 14, 14), float32]) → Tensor[(1, 512, 14, 14), float32] /;
%122 = %121(%120) / ty=Tensor[(1, 512, 14, 14), float32] /;
%123 = fn (%FunctionVar_6_0: Tensor[(1, 512, 14, 14), float32] / ty=Tensor[(1, 512, 14, 14), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 512, 14, 14), float32] {
%20 = nn.conv2d(%FunctionVar_6_0, meta[relay.Constant][32] / ty=Tensor[(512, 512, 1, 1), float32] /, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) / ty=Tensor[(1, 512, 14, 14), float32] /;
%21 = nn.batch_norm(%20, meta[relay.Constant][33] / ty=Tensor[(512), float32] /, meta[relay.Constant][34] / ty=Tensor[(512), float32] /, meta[relay.Constant][35] / ty=Tensor[(512), float32] /, meta[relay.Constant][36] / ty=Tensor[(512), float32] /) / ty=(Tensor[(1, 512, 14, 14), float32], Tensor[(512), float32], Tensor[(512), float32]) /;
%22 = %21.0 / ty=Tensor[(1, 512, 14, 14), float32] /;
nn.relu(%22) / ty=Tensor[(1, 512, 14, 14), float32] /
} / ty=fn (Tensor[(1, 512, 14, 14), float32]) → Tensor[(1, 512, 14, 14), float32] /;
%124 = %123(%122) / ty=Tensor[(1, 512, 14, 14), float32] /;
%125 = fn (%FunctionVar_5_0: Tensor[(1, 512, 14, 14), float32] / ty=Tensor[(1, 512, 14, 14), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 512, 14, 14), float32] {
%17 = nn.conv2d(%FunctionVar_5_0, meta[relay.Constant][27] / ty=Tensor[(1, 512, 3, 3), float32] /, padding=[1, 1, 1, 1], groups=512, channels=512, kernel_size=[3, 3], kernel_layout=“IOHW”) / ty=Tensor[(1, 512, 14, 14), float32] /;
%18 = nn.batch_norm(%17, meta[relay.Constant][28] / ty=Tensor[(512), float32] /, meta[relay.Constant][29] / ty=Tensor[(512), float32] /, meta[relay.Constant][30] / ty=Tensor[(512), float32] /, meta[relay.Constant][31] / ty=Tensor[(512), float32] /) / ty=(Tensor[(1, 512, 14, 14), float32], Tensor[(512), float32], Tensor[(512), float32]) /;
%19 = %18.0 / ty=Tensor[(1, 512, 14, 14), float32] /;
nn.relu(%19) / ty=Tensor[(1, 512, 14, 14), float32] /
} / ty=fn (Tensor[(1, 512, 14, 14), float32]) → Tensor[(1, 512, 14, 14), float32] /;
%126 = %125(%124) / ty=Tensor[(1, 512, 14, 14), float32] /;
%127 = fn (%FunctionVar_4_0: Tensor[(1, 512, 14, 14), float32] / ty=Tensor[(1, 512, 14, 14), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 512, 14, 14), float32] {
%14 = nn.conv2d(%FunctionVar_4_0, meta[relay.Constant][22] / ty=Tensor[(512, 512, 1, 1), float32] /, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) / ty=Tensor[(1, 512, 14, 14), float32] /;
%15 = nn.batch_norm(%14, meta[relay.Constant][23] / ty=Tensor[(512), float32] /, meta[relay.Constant][24] / ty=Tensor[(512), float32] /, meta[relay.Constant][25] / ty=Tensor[(512), float32] /, meta[relay.Constant][26] / ty=Tensor[(512), float32] /) / ty=(Tensor[(1, 512, 14, 14), float32], Tensor[(512), float32], Tensor[(512), float32]) /;
%16 = %15.0 / ty=Tensor[(1, 512, 14, 14), float32] /;
nn.relu(%16) / ty=Tensor[(1, 512, 14, 14), float32] /
} / ty=fn (Tensor[(1, 512, 14, 14), float32]) → Tensor[(1, 512, 14, 14), float32] /;
%128 = %127(%126) / ty=Tensor[(1, 512, 14, 14), float32] /;
%129 = fn (%FunctionVar_3_0: Tensor[(1, 512, 14, 14), float32] / ty=Tensor[(1, 512, 14, 14), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 512, 7, 7), float32] {
%11 = nn.conv2d(%FunctionVar_3_0, meta[relay.Constant][17] / ty=Tensor[(1, 512, 3, 3), float32] /, strides=[2, 2], padding=[1, 1, 1, 1], groups=512, channels=512, kernel_size=[3, 3], kernel_layout=“IOHW”) / ty=Tensor[(1, 512, 7, 7), float32] /;
%12 = nn.batch_norm(%11, meta[relay.Constant][18] / ty=Tensor[(512), float32] /, meta[relay.Constant][19] / ty=Tensor[(512), float32] /, meta[relay.Constant][20] / ty=Tensor[(512), float32] /, meta[relay.Constant][21] / ty=Tensor[(512), float32] /) / ty=(Tensor[(1, 512, 7, 7), float32], Tensor[(512), float32], Tensor[(512), float32]) /;
%13 = %12.0 / ty=Tensor[(1, 512, 7, 7), float32] /;
nn.relu(%13) / ty=Tensor[(1, 512, 7, 7), float32] /
} / ty=fn (Tensor[(1, 512, 14, 14), float32]) → Tensor[(1, 512, 7, 7), float32] /;
%130 = %129(%128) / ty=Tensor[(1, 512, 7, 7), float32] /;
%131 = fn (%FunctionVar_2_0: Tensor[(1, 512, 7, 7), float32] / ty=Tensor[(1, 512, 7, 7), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 1024, 7, 7), float32] {
%8 = nn.conv2d(%FunctionVar_2_0, meta[relay.Constant][12] / ty=Tensor[(1024, 512, 1, 1), float32] /, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]) / ty=Tensor[(1, 1024, 7, 7), float32] /;
%9 = nn.batch_norm(%8, meta[relay.Constant][13] / ty=Tensor[(1024), float32] /, meta[relay.Constant][14] / ty=Tensor[(1024), float32] /, meta[relay.Constant][15] / ty=Tensor[(1024), float32] /, meta[relay.Constant][16] / ty=Tensor[(1024), float32] /) / ty=(Tensor[(1, 1024, 7, 7), float32], Tensor[(1024), float32], Tensor[(1024), float32]) /;
%10 = %9.0 / ty=Tensor[(1, 1024, 7, 7), float32] /;
nn.relu(%10) / ty=Tensor[(1, 1024, 7, 7), float32] /
} / ty=fn (Tensor[(1, 512, 7, 7), float32]) → Tensor[(1, 1024, 7, 7), float32] /;
%132 = %131(%130) / ty=Tensor[(1, 1024, 7, 7), float32] /;
%133 = fn (%FunctionVar_1_0: Tensor[(1, 1024, 7, 7), float32] / ty=Tensor[(1, 1024, 7, 7), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 1024, 7, 7), float32] {
%5 = nn.conv2d(%FunctionVar_1_0, meta[relay.Constant][7] / ty=Tensor[(1, 1024, 3, 3), float32] /, padding=[1, 1, 1, 1], groups=1024, channels=1024, kernel_size=[3, 3], kernel_layout=“IOHW”) / ty=Tensor[(1, 1024, 7, 7), float32] /;
%6 = nn.batch_norm(%5, meta[relay.Constant][8] / ty=Tensor[(1024), float32] /, meta[relay.Constant][9] / ty=Tensor[(1024), float32] /, meta[relay.Constant][10] / ty=Tensor[(1024), float32] /, meta[relay.Constant][11] / ty=Tensor[(1024), float32] /) / ty=(Tensor[(1, 1024, 7, 7), float32], Tensor[(1024), float32], Tensor[(1024), float32]) /;
%7 = %6.0 / ty=Tensor[(1, 1024, 7, 7), float32] /;
nn.relu(%7) / ty=Tensor[(1, 1024, 7, 7), float32] /
} / ty=fn (Tensor[(1, 1024, 7, 7), float32]) → Tensor[(1, 1024, 7, 7), float32] /;
%134 = %133(%132) / ty=Tensor[(1, 1024, 7, 7), float32] /;
%135 = fn (%FunctionVar_0_03: Tensor[(1, 1024, 7, 7), float32] / ty=Tensor[(1, 1024, 7, 7), float32] /, PartitionedFromPattern=“nn.conv2d_nn.batch_norm_TupleGetItem0_nn.relu_”, Composite=“clml.conv2d”) → Tensor[(1, 1024, 7, 7), float32] {
%2 = nn.conv2d(%FunctionVar_0_03, meta[relay.Constant][2] / ty=Tensor[(1024, 1024, 1, 1), float32] /, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]) / ty=Tensor[(1, 1024, 7, 7), float32] /;
%3 = nn.batch_norm(%2, meta[relay.Constant][3] / ty=Tensor[(1024), float32] /, meta[relay.Constant][4] / ty=Tensor[(1024), float32] /, meta[relay.Constant][5] / ty=Tensor[(1024), float32] /, meta[relay.Constant][6] / ty=Tensor[(1024), float32] /) / ty=(Tensor[(1, 1024, 7, 7), float32], Tensor[(1024), float32], Tensor[(1024), float32]) /;
%4 = %3.0 / ty=Tensor[(1, 1024, 7, 7), float32] /;
nn.relu(%4) / ty=Tensor[(1, 1024, 7, 7), float32] /
} / ty=fn (Tensor[(1, 1024, 7, 7), float32]) → Tensor[(1, 1024, 7, 7), float32] /;
%136 = %135(%134) / ty=Tensor[(1, 1024, 7, 7), float32] /;
%137 = fn (%FunctionVar_0_02: Tensor[(1, 1024, 7, 7), float32] / ty=Tensor[(1, 1024, 7, 7), float32] /, PartitionedFromPattern=“nn.global_avg_pool2d_”, Composite=“clml.global_avg_pool2d”) → Tensor[(1, 1024, 1, 1), float32] {
nn.global_avg_pool2d(%FunctionVar_0_02) / ty=Tensor[(1, 1024, 1, 1), float32] /
} / ty=fn (Tensor[(1, 1024, 7, 7), float32]) → Tensor[(1, 1024, 1, 1), float32] /;
%138 = %137(%136) / ty=Tensor[(1, 1024, 1, 1), float32] /;
%139 = fn (%FunctionVar_0_01: Tensor[(1, 1024, 1, 1), float32] / ty=Tensor[(1, 1024, 1, 1), float32] /, PartitionedFromPattern=“nn.batch_flatten_”, Composite=“clml.batch_flatten”) → Tensor[(1, 1024), float32] {
nn.batch_flatten(%FunctionVar_0_01) / ty=Tensor[(1, 1024), float32] /
} / ty=fn (Tensor[(1, 1024, 1, 1), float32]) → Tensor[(1, 1024), float32] /;
%140 = %139(%138) / ty=Tensor[(1, 1024), float32] /;
%141 = fn (%FunctionVar_0_0: Tensor[(1, 1024), float32] / ty=Tensor[(1, 1024), float32] /, PartitionedFromPattern=“nn.dense_”, Composite=“clml.dense”) → Tensor[(1, 1000), float32] {
nn.dense(%FunctionVar_0_0, meta[relay.Constant][1] / ty=Tensor[(1000, 1024), float32] /, units=1000) / ty=Tensor[(1, 1000), float32] /
} / ty=fn (Tensor[(1, 1024), float32]) → Tensor[(1, 1000), float32] /;
%141(%140) / ty=Tensor[(1, 1000), float32] */
}
def @ tvmgen_default_clml_main_1(%clml_1_i0: Tensor[(1, 1000), float32] /* ty=Tensor[(1, 1000), float32] /, Compiler=“clml”, Primitive=1, Inline=1, global_symbol=“tvmgen_default_clml_main_1”) → Tensor[(1, 1000), float32] {
%142 = fn (%FunctionVar_0_04: Tensor[(1, 1000), float32] / ty=Tensor[(1, 1000), float32] /, PartitionedFromPattern=“nn.softmax_”, Composite=“clml.softmax”) → Tensor[(1, 1000), float32] {
nn.softmax(%FunctionVar_0_04) / ty=Tensor[(1, 1000), float32] /
} / ty=fn (Tensor[(1, 1000), float32]) → Tensor[(1, 1000), float32] /;
%142(%clml_1_i0) / ty=Tensor[(1, 1000), float32] */
}