Tvm.parser.fromtext() results in Segmentatin Fault

Hi everybody,

I’m using the “tvm.parser.fromtext()” for parsing the Relay code. But for the following Relay code, I got segmentation fault from “tvm.parser.fromtext”. Does Des anybody know the reason?

....
expr = '''
#[version = "0.0.5"]
def @main(%X: Tensor[(32, 144, 963), float32], %padding_masks: Tensor[(32, 144), bool], %project_inp_bias: Tensor[(128), float32], %pos_enc_pe: Tensor[(144, 1, 128), float32], %transformer_encoder_layers_0_self_attn_in_proj_bias: Tensor[(384), float32], %transformer_encoder_layers_0_self_attn_out_proj_bias: Tensor[(128), float32], %transformer_encoder_layers_0_linear1_bias: Tensor[(512), float32], %transformer_encoder_layers_0_linear2_bias: Tensor[(128), float32], %transformer_encoder_layers_0_norm1_weight: Tensor[(128), float32], %transformer_encoder_layers_0_norm1_bias: Tensor[(128), float32], %transformer_encoder_layers_0_norm1_running_mean: Tensor[(128), float32], %transformer_encoder_layers_0_norm1_running_var: Tensor[(128), float32], %transformer_encoder_layers_0_norm2_weight: Tensor[(128), float32], %transformer_encoder_layers_0_norm2_bias: Tensor[(128), float32], %transformer_encoder_layers_0_norm2_running_mean: Tensor[(128), float32], %transformer_encoder_layers_0_norm2_running_var: Tensor[(128), float32], %output_layer_weight: Tensor[(7, ?), float32], %output_layer_bias: Tensor[(7), float32], %v168: Tensor[(963, 128), float32], %v169: Tensor[(1), int64], %v170: Tensor[(1), int64], %v171: Tensor[(128, 384), float32], %v172: Tensor[(1), int64], %v173: Tensor[(1), int64], %v174: Tensor[(1), int64], %v175: Tensor[(128, 128), float32], %v176: Tensor[(128, 512), float32], %v177: Tensor[(512, 128), float32], %v178: Tensor[(1), int64]) {
  %0 = transpose(%X, axes=[1, 0, 2]);
  %1 = reshape(%0, newshape=[-1, 32, 963]);
  %2 = reshape(%v168, newshape=[-1, 963, 128]);
  %3 = transpose(%2, axes=[0, 2, 1]);
  %4 = nn.batch_matmul(%1, %3, meta[relay.attrs.BatchMatmulAttrs][0]);
  %5 = reshape(%4, newshape=[144, 32, 128]);
  %6 = add(%5, %project_inp_bias);
  %7 = cast(11.3138f /* ty=float32 */, dtype="float32");
  %8 = multiply(%6, %7);
  %9 = scatter(meta[relay.Constant][0] /* ty=Tensor[(3), int64] */, %v170, %v169, meta[relay.attrs.ScatterAttrs][0]);
  %10 = cast_like(0 /* ty=int32 */, %9);
  %11 = less(%9, %10);
  %12 = shape_of(%pos_enc_pe, dtype="int32");
  %13 = cast_like(%12, %9);
  %14 = add(%9, %13);
  %15 = where(%11, %14, %9);
  %16 = scatter(meta[relay.Constant][1] /* ty=Tensor[(3), int64] */, %v170, meta[relay.Constant][2] /* ty=Tensor[(1), int64] */, meta[relay.attrs.ScatterAttrs][1]);
  %17 = scatter(meta[relay.Constant][3] /* ty=Tensor[(3), int64] */, %v170, meta[relay.Constant][4] /* ty=Tensor[(1), int64] */, meta[relay.attrs.ScatterAttrs][2]);
  %18 = dyn.strided_slice(%pos_enc_pe, %15, %16, %17, begin=None, end=None, strides=None);
  %19 = add(%8, %18);
  %20 = logical_not(%padding_masks);
  %21 = expand_dims(%20, axis=1);
  %22 = expand_dims(%21, axis=2);
  %23 = cast(%22, dtype="bool");
  %24 = reshape(%19, newshape=[-1, 32, 128]);
  %25 = reshape(%v171, newshape=[-1, 128, 384]);
  %26 = transpose(%25, axes=[0, 2, 1]);
  %27 = nn.batch_matmul(%24, %26, meta[relay.attrs.BatchMatmulAttrs][1]);
  %28 = reshape(%27, newshape=[144, 32, 384]);
  %29 = add(%28, %transformer_encoder_layers_0_self_attn_in_proj_bias);
  %30 = split(%29, indices_or_sections=[128, 256], axis=-1);
  %31 = %30.0;
  %32 = cast(0.25f /* ty=float32 */, dtype="float32");
  %33 = multiply(%31, %32);
  %34 = reshape(%33, newshape=[144, 256, 16]);
  %35 = transpose(%34, axes=[1, 0, 2]);
  %36 = reshape(%35, newshape=[-1, 144, 16]);
  %37 = %30.1;
  %38 = (%v172, meta[relay.Constant][6] /* ty=Tensor[(1), int64] */, meta[relay.Constant][7] /* ty=Tensor[(1), int64] */);
  %39 = concatenate(%38);
  %40 = dyn.reshape(%37, %39, newshape=[]);
  %41 = transpose(%40, axes=[1, 2, 0]);
  %42 = shape_of(%41, dtype="int64");
  %43 = strided_slice(%42, begin=[1], end=[3], strides=[1]);
  %44 = (meta[relay.Constant][8] /* ty=Tensor[(1), int64] */, %43);
  %45 = concatenate(%44);
  %46 = dyn.reshape(%41, %45, newshape=[]);
  %47 = transpose(%46, axes=[0, 2, 1]);
  %48 = nn.batch_matmul(%36, %47, meta[relay.attrs.BatchMatmulAttrs][2]);
  %49 = strided_slice(%42, begin=[0], end=[1], strides=[1]);
  %50 = maximum(meta[relay.Constant][9] /* ty=Tensor[(1), int64] */, %49);
  %51 = (%50,);
  %52 = concatenate(%51);
  strided_slice(%42, begin=[2], end=[3], strides=[1])
}

'''
module = tvm.parser.fromtext(expr)
....