Some ops from Torchscript call for support

I used PyTorch-1.9.0 and Python 3.6:

I work with a PyTorch porject, and find the following ops from Torchscript haven’t been support by TVM frontend.

aten::dim’, ‘prim::dtype’, ‘prim::unchecked_cast’, ‘aten::append’, ‘aten::warn’, ‘prim::TupleIndex’, ‘prim::Uninitialized’, ‘aten::copy_’, ‘aten::__is__’, ‘aten::format’, ‘aten::item’, ‘aten::__derive_index’, ‘aten::list’, ‘prim::data

Part of my Torchscript is (limited by 40000 max text length):

graph(%self.1 : __torch__.decoders.ScriptGreedyDecoder,
      %x.1 : Tensor,
      %out_lens.1 : Tensor):
  %3 : NoneType = prim::Constant()
  %4 : bool = prim::Constant[value=1]() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:66:8
  %5 : int = prim::Constant[value=0]() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:66:43
  %6 : int = prim::Constant[value=1]() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:67:54
  %7 : __torch__.model_separable_rnnt.RNNT = prim::GetAttr[name="_model"](%self.1)
  %8 : __torch__.model_separable_rnnt.___torch_mangle_4.Encoder = prim::GetAttr[name="encoder"](%7)
  %23 : bool = prim::Constant[value=0]() # :0:0
  %24 : bool = prim::Constant[value=1]() # /python3.6/site-packages/torch/nn/modules/rnn.py:679:61
  %25 : NoneType = prim::Constant() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:83:45
  %26 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
  %27 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
  %28 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
  %29 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
  %30 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
  %31 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
  %32 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
  %33 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
  %34 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
  %35 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
  %36 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
  %37 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
  %38 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
  %39 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
  %40 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
  %41 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
  %42 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
  %43 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
  %44 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
  %45 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
  %46 : str = prim::Constant[value="Expected hidden[1] size {}, got {}"]() # /python3.6/site-packages/torch/nn/modules/rnn.py:624:31
  %47 : str = prim::Constant[value="Expected hidden[0] size {}, got {}"]() # /python3.6/site-packages/torch/nn/modules/rnn.py:622:31
  %48 : str = prim::Constant[value="input.size(-1) must be equal to input_size. Expected {}, got {}"]() # /python3.6/site-packages/torch/nn/modules/rnn.py:206:16
  %49 : str = prim::Constant[value="input must have {} dimensions, got {}"]() # /python3.6/site-packages/torch/nn/modules/rnn.py:202:16
  %50 : int = prim::Constant[value=1]() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:88:41
  %51 : int = prim::Constant[value=2]() # :0:0
  %52 : int = prim::Constant[value=1024]() # /python3.6/site-packages/torch/nn/modules/rnn.py:664:73
  %53 : int = prim::Constant[value=3]() # /python3.6/site-packages/torch/nn/modules/rnn.py:199:63
  %54 : int = prim::Constant[value=240]() # /python3.6/site-packages/torch/nn/modules/rnn.py:204:11
  %55 : int = prim::Constant[value=-1]() # /python3.6/site-packages/torch/nn/modules/rnn.py:204:41
  %56 : float = prim::Constant[value=0.32000000000000001]() # /python3.6/site-packages/torch/nn/modules/rnn.py:680:30
  %57 : int = prim::Constant[value=0]() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:88:38
  %58 : int = prim::Constant[value=6]() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/rnn.py:105:28
  %59 : int = prim::Constant[value=2048]() # /python3.6/site-packages/torch/nn/modules/rnn.py:204:11
  %self_pre_rnn_lstm__flat_weights.1 : Tensor[] = prim::ListConstruct(%45, %44, %43, %42, %41, %40, %39, %38)
  %self_post_rnn_lstm__flat_weights.1 : Tensor[] = prim::ListConstruct(%37, %36, %35, %34, %33, %32, %31, %30, %29, %28, %27, %26)
  %max_batch_size.3 : int = aten::size(%x.1, %50) # /python3.6/site-packages/torch/nn/modules/rnn.py:658:68
  %63 : int = prim::dtype(%x.1)
  %64 : Device = prim::device(%x.1)
  %65 : int[] = prim::ListConstruct(%51, %max_batch_size.3, %52)
  %h_zeros.3 : Tensor = aten::zeros(%65, %63, %25, %64, %25) # /python3.6/site-packages/torch/nn/modules/rnn.py:665:22
  %67 : int = prim::dtype(%x.1)
  %68 : Device = prim::device(%x.1)
  %c_zeros.3 : Tensor = aten::zeros(%65, %67, %25, %68, %25) # /python3.6/site-packages/torch/nn/modules/rnn.py:668:22
  %70 : int = aten::dim(%x.1) # /python3.6/site-packages/torch/nn/modules/rnn.py:200:11
  %71 : bool = aten::ne(%70, %53) # /python3.6/site-packages/torch/nn/modules/rnn.py:200:11
   = prim::If(%71) # /python3.6/site-packages/torch/nn/modules/rnn.py:200:8
    block0():
      %72 : int = aten::dim(%x.1) # /python3.6/site-packages/torch/nn/modules/rnn.py:203:40
      %73 : str = aten::format(%49, %53, %72) # /python3.6/site-packages/torch/nn/modules/rnn.py:202:16
       = prim::RaiseException(%73) # /python3.6/site-packages/torch/nn/modules/rnn.py:201:12
      -> ()
    block1():
      -> ()
  %74 : int = aten::size(%x.1, %55) # /python3.6/site-packages/torch/nn/modules/rnn.py:204:30
  %75 : bool = aten::ne(%54, %74) # /python3.6/site-packages/torch/nn/modules/rnn.py:204:11
   = prim::If(%75) # /python3.6/site-packages/torch/nn/modules/rnn.py:204:8
    block0():
      %76 : int = aten::size(%x.1, %55) # /python3.6/site-packages/torch/nn/modules/rnn.py:207:37
      %77 : str = aten::format(%48, %54, %76) # /python3.6/site-packages/torch/nn/modules/rnn.py:206:16
       = prim::RaiseException(%77) # /python3.6/site-packages/torch/nn/modules/rnn.py:205:12
      -> ()
    block1():
      -> ()
  %mini_batch.1 : int = aten::size(%x.1, %50) # /python3.6/site-packages/torch/nn/modules/rnn.py:213:64
  %expected_hidden_size.3 : (int, int, int) = prim::TupleConstruct(%51, %mini_batch.1, %52)
  %80 : int[] = aten::size(%h_zeros.3) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:11
  %81 : int[] = prim::ListConstruct(%51, %mini_batch.1, %52)
  %82 : bool = aten::ne(%80, %81) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:11
   = prim::If(%82) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:8
    block0():
      %83 : int[] = aten::size(%h_zeros.3) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:69
      %84 : int[] = aten::list(%83) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:64
      %85 : str = aten::format(%47, %expected_hidden_size.3, %84) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:31
       = prim::RaiseException(%85) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:12
      -> ()
    block1():
      -> ()
  %mini_batch0.1 : int = aten::size(%x.1, %50) # /python3.6/site-packages/torch/nn/modules/rnn.py:607:64
  %expected_hidden_size0.1 : (int, int, int) = prim::TupleConstruct(%51, %mini_batch0.1, %52)
  %88 : int[] = aten::size(%c_zeros.3) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:11
  %89 : int[] = prim::ListConstruct(%51, %mini_batch0.1, %52)
  %90 : bool = aten::ne(%88, %89) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:11
   = prim::If(%90) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:8
    block0():
      %91 : int[] = aten::size(%c_zeros.3) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:69
      %92 : int[] = aten::list(%91) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:64
      %93 : str = aten::format(%46, %expected_hidden_size0.1, %92) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:31
       = prim::RaiseException(%93) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:12
      -> ()
    block1():
      -> ()
  %94 : Tensor[] = prim::ListConstruct(%h_zeros.3, %c_zeros.3)
  %95 : Tensor, %96 : Tensor, %97 : Tensor = aten::lstm(%x.1, %94, %self_pre_rnn_lstm__flat_weights.1, %24, %51, %56, %23, %23, %23) # /python3.6/site-packages/torch/nn/modules/rnn.py:679:21
  %98 : Tensor = prim::data(%95)
  %99 : Tensor = aten::dropout_(%98, %56, %23) # /python3.6/site-packages/torch/nn/functional.py:1168:11
  %seq.1 : Tensor[] = prim::ListConstruct(%95)
   = prim::Loop(%50, %24) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/rnn.py:100:8
    block0(%101 : int):
      %i.1 : int = aten::__derive_index(%101, %50, %50) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/rnn.py:100:8
      %tmp.1 : Tensor = aten::zeros_like(%95, %25, %25, %25, %25, %25) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/rnn.py:102:18
      %104 : Tensor = aten::slice(%95, %57, %i.1, %25, %50) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/rnn.py:103:29
      %105 : Tensor = aten::slice(%104, %50, %25, %25, %50) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/rnn.py:103:29
      %106 : Tensor = aten::slice(%105, %51, %25, %25, %50) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/rnn.py:103:29
      %107 : int = aten::neg(%i.1) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/rnn.py:103:17
      %108 : Tensor = aten::slice(%tmp.1, %57, %25, %107, %50) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/rnn.py:103:12
      %109 : Tensor = aten::slice(%108, %50, %25, %25, %50) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/rnn.py:103:12
      %110 : Tensor = aten::slice(%109, %51, %25, %25, %50) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/rnn.py:103:12
      %111 : Tensor = aten::copy_(%110, %106, %23) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/rnn.py:103:12
      %112 : Tensor[] = aten::append(%seq.1, %tmp.1) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/rnn.py:104:12
      -> (%24)
  %113 : Tensor = aten::to(%out_lens.1, %58, %23, %23, %25) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/rnn.py:105:28
  %114 : Tensor = aten::div(%113, %51) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/rnn.py:105:28
  %115 : Tensor = aten::ceil(%114) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/rnn.py:105:17
  %x_lens0.1 : Tensor = aten::to(%115, %53, %23, %23, %25) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/rnn.py:105:17
  %117 : Tensor = aten::cat(%seq.1, %51) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/rnn.py:107:15
  %118 : Tensor = aten::slice(%117, %57, %25, %25, %51) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/rnn.py:107:15
  %119 : Tensor = aten::slice(%118, %50, %25, %25, %50) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/rnn.py:107:15
  %120 : Tensor = aten::slice(%119, %51, %25, %25, %50) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/rnn.py:107:15
  %max_batch_size0.1 : int = aten::size(%120, %50) # /python3.6/site-packages/torch/nn/modules/rnn.py:658:68
  %122 : int = prim::dtype(%120)
  %123 : Device = prim::device(%120)
  %124 : int[] = prim::ListConstruct(%53, %max_batch_size0.1, %52)
  %h_zeros0.1 : Tensor = aten::zeros(%124, %122, %25, %123, %25) # /python3.6/site-packages/torch/nn/modules/rnn.py:665:22
  %126 : int = prim::dtype(%120)
  %127 : Device = prim::device(%120)
  %c_zeros0.1 : Tensor = aten::zeros(%124, %126, %25, %127, %25) # /python3.6/site-packages/torch/nn/modules/rnn.py:668:22
  %129 : int = aten::dim(%120) # /python3.6/site-packages/torch/nn/modules/rnn.py:200:11
  %130 : bool = aten::ne(%129, %53) # /python3.6/site-packages/torch/nn/modules/rnn.py:200:11
   = prim::If(%130) # /python3.6/site-packages/torch/nn/modules/rnn.py:200:8
    block0():
      %131 : int = aten::dim(%120) # /python3.6/site-packages/torch/nn/modules/rnn.py:203:40
      %132 : str = aten::format(%49, %53, %131) # /python3.6/site-packages/torch/nn/modules/rnn.py:202:16
       = prim::RaiseException(%132) # /python3.6/site-packages/torch/nn/modules/rnn.py:201:12
      -> ()
    block1():
      -> ()
  %133 : int = aten::size(%120, %55) # /python3.6/site-packages/torch/nn/modules/rnn.py:204:30
  %134 : bool = aten::ne(%59, %133) # /python3.6/site-packages/torch/nn/modules/rnn.py:204:11
   = prim::If(%134) # /python3.6/site-packages/torch/nn/modules/rnn.py:204:8
    block0():
      %135 : int = aten::size(%120, %55) # /python3.6/site-packages/torch/nn/modules/rnn.py:207:37
      %136 : str = aten::format(%48, %59, %135) # /python3.6/site-packages/torch/nn/modules/rnn.py:206:16
       = prim::RaiseException(%136) # /python3.6/site-packages/torch/nn/modules/rnn.py:205:12
      -> ()
    block1():
      -> ()
  %mini_batch1.1 : int = aten::size(%120, %50) # /python3.6/site-packages/torch/nn/modules/rnn.py:213:64
  %expected_hidden_size1.1 : (int, int, int) = prim::TupleConstruct(%53, %mini_batch1.1, %52)
  %139 : int[] = aten::size(%h_zeros0.1) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:11
  %140 : int[] = prim::ListConstruct(%53, %mini_batch1.1, %52)
  %141 : bool = aten::ne(%139, %140) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:11
   = prim::If(%141) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:8
    block0():
      %142 : int[] = aten::size(%h_zeros0.1) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:69
      %143 : int[] = aten::list(%142) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:64
      %144 : str = aten::format(%47, %expected_hidden_size1.1, %143) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:31
       = prim::RaiseException(%144) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:12
      -> ()
    block1():
      -> ()
  %mini_batch2.1 : int = aten::size(%120, %50) # /python3.6/site-packages/torch/nn/modules/rnn.py:607:64
  %expected_hidden_size2.1 : (int, int, int) = prim::TupleConstruct(%53, %mini_batch2.1, %52)
  %147 : int[] = aten::size(%c_zeros0.1) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:11
  %148 : int[] = prim::ListConstruct(%53, %mini_batch2.1, %52)
  %149 : bool = aten::ne(%147, %148) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:11
   = prim::If(%149) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:8
    block0():
      %150 : int[] = aten::size(%c_zeros0.1) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:69
      %151 : int[] = aten::list(%150) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:64
      %152 : str = aten::format(%46, %expected_hidden_size2.1, %151) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:31
       = prim::RaiseException(%152) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:12
      -> ()
    block1():
      -> ()
  %153 : Tensor[] = prim::ListConstruct(%h_zeros0.1, %c_zeros0.1)
  %154 : Tensor, %155 : Tensor, %156 : Tensor = aten::lstm(%120, %153, %self_post_rnn_lstm__flat_weights.1, %24, %53, %56, %23, %23, %23) # /python3.6/site-packages/torch/nn/modules/rnn.py:679:21
  %157 : Tensor = prim::data(%154)
  %158 : Tensor = aten::dropout_(%157, %56, %23) # /python3.6/site-packages/torch/nn/functional.py:1168:11
  %x_padded0.1 : Tensor = aten::transpose(%154, %57, %50) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:88:19
  %160 : (Tensor, Tensor) = prim::TupleConstruct(%x_padded0.1, %x_lens0.1)
  %logits.2 : Tensor, %logits_lens.1 : Tensor = prim::TupleUnpack(%160)
  %output.1 : int[][] = prim::ListConstruct()
  %13 : int = aten::size(%logits.2, %5) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:66:31
   = prim::Loop(%13, %4) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:66:8
    block0(%batch_idx.1 : int):
      %15 : Tensor = aten::select(%logits.2, %5, %batch_idx.1) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:67:20
      %16 : Tensor = aten::slice(%15, %5, %3, %3, %6) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:67:20
      %17 : Tensor = aten::slice(%16, %6, %3, %3, %6) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:67:20
      %inseq.1 : Tensor = aten::unsqueeze(%17, %6) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:67:20
      %logitlen.1 : Tensor = aten::select(%logits_lens.1, %5, %batch_idx.1) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:69:23
      %161 : bool = prim::Constant[value=0]() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:89:60
      %162 : bool = prim::Constant[value=1]() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:78:8
      %hidden.11 : NoneType = prim::Constant() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:76:62
      %164 : int = prim::Constant[value=0]() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:79:44
      %165 : int = prim::Constant[value=1]() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:100:33
      %166 : int = prim::Constant[value=9223372036854775807]() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:84:12
      %sentence.1 : int[] = prim::ListConstruct()
      %168 : Scalar = aten::item(%logitlen.1) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:78:34
      %169 : int = aten::Int(%168) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:78:30
      %hidden : (Tensor, Tensor)? = prim::Loop(%169, %162, %hidden.11) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:78:8
        block0(%time_idx.1 : int, %hidden.9 : (Tensor, Tensor)?):
          %173 : Tensor = aten::select(%inseq.1, %164, %time_idx.1) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:79:16
          %174 : Tensor = aten::slice(%173, %164, %hidden.11, %hidden.11, %165) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:79:16
          %175 : Tensor = aten::slice(%174, %165, %hidden.11, %hidden.11, %165) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:79:16
          %f.1 : Tensor = aten::unsqueeze(%175, %164) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:79:16
          %177 : int = prim::GetAttr[name="_max_symbols_per_step"](%self.1)
          %178 : bool = aten::lt(%164, %177) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:84:32
          %hidden0 : (Tensor, Tensor)?, %not_blank : bool, %symbols_added : int = prim::Loop(%166, %178, %hidden.9, %162, %164) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:84:12
            block0(%182 : int, %hidden0.9 : (Tensor, Tensor)?, %not_blank.7 : bool, %symbols_added.5 : int):
              %186 : int = prim::Constant[value=0]() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:122:43
              %187 : int = prim::Constant[value=-1]() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:122:57
              %188 : int = aten::len(%sentence.1) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:122:28
              %189 : bool = aten::eq(%188, %186) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:122:28
              %190 : int = prim::If(%189) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:122:15
                block0():
                  %191 : int = prim::GetAttr[name="_SOS"](%self.1)
                  -> (%191)
                block1():
                  %192 : int = aten::__getitem__(%sentence.1, %187) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:122:50
                  -> (%192)
              %193 : bool = prim::Constant[value=0]()
              %194 : NoneType = prim::Constant() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:106:42
              %195 : int = prim::Constant[value=1]() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:108:21
              %196 : int = prim::Constant[value=4]() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:109:46
              %197 : int = prim::GetAttr[name="_SOS"](%self.1)
              %198 : bool = aten::eq(%190, %197) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:105:11
              %199 : (Tensor, (Tensor, Tensor)) = prim::If(%198) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:105:8
                block0():
                  %200 : __torch__.model_separable_rnnt.RNNT = prim::GetAttr[name="_model"](%self.1)
                  %201 : __torch__.model_separable_rnnt.___torch_mangle_13.Prediction = prim::GetAttr[name="prediction"](%200)
                  %202 : bool = prim::Constant[value=1]() # /python3.6/site-packages/torch/nn/modules/rnn.py:679:61
                  %203 : bool = prim::Constant[value=0]()
                  %204 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
                  %y0.2 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
                  %206 : str = prim::Constant[value="AssertionError: "]() # :0:0
                  %207 : NoneType = prim::Constant() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:126:16
                  %208 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
                  %209 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
                  %210 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
                  %211 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
                  %212 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
                  %213 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
                  %214 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
                  %215 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
                  %216 : str = prim::Constant[value="Expected hidden[1] size {}, got {}"]() # /python3.6/site-packages/torch/nn/modules/rnn.py:624:31
                  %217 : str = prim::Constant[value="Expected hidden[0] size {}, got {}"]() # /python3.6/site-packages/torch/nn/modules/rnn.py:622:31
                  %218 : str = prim::Constant[value="input.size(-1) must be equal to input_size. Expected {}, got {}"]() # /python3.6/site-packages/torch/nn/modules/rnn.py:206:16
                  %219 : str = prim::Constant[value="input must have {} dimensions, got {}"]() # /python3.6/site-packages/torch/nn/modules/rnn.py:202:16
                  %220 : int = prim::Constant[value=0]() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:144:24
                  %221 : int = prim::Constant[value=1]() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:131:16
                  %222 : int = prim::Constant[value=2]() # :0:0
                  %223 : int = prim::Constant[value=320]() # :0:0
                  %224 : int = prim::Constant[value=3]() # /python3.6/site-packages/torch/nn/modules/rnn.py:199:63
                  %225 : int = prim::Constant[value=-1]() # /python3.6/site-packages/torch/nn/functional.py:2030:22
                  %226 : float = prim::Constant[value=0.32000000000000001]() # /python3.6/site-packages/torch/nn/modules/rnn.py:680:30
                  %self_dec_rnn_lstm__flat_weights.2 : Tensor[] = prim::ListConstruct(%215, %214, %213, %212, %211, %210, %209, %208)
                  %228 : (Tensor, Tensor)? = prim::Uninitialized() # :0:0
                  %229 : bool = aten::__is__(%194, %207) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:126:11
                  %y0.4 : Tensor, %state0.1 : (Tensor, Tensor)? = prim::If(%229) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:126:8
                    block0():
                      %232 : bool = aten::__is__(%hidden0.9, %207) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:129:19
                      %state1.1 : (Tensor, Tensor)? = prim::If(%232) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:129:12
                        block0():
                          -> (%hidden0.9)
                        block1():
                           = prim::RaiseException(%206) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:129:12
                          -> (%228)
                      -> (%y0.2, %state1.1)
                    block1():
                      %y1.2 : Tensor = prim::unchecked_cast(%194)
                      %y2.2 : Tensor = aten::embedding(%204, %y1.2, %225, %203, %203) # /python3.6/site-packages/torch/nn/functional.py:2043:11
                      -> (%y2.2, %hidden0.9)
                  %y3.2 : Tensor = aten::transpose(%y0.4, %220, %221) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:144:12
                  %max_batch_size.2 : int = aten::size(%y3.2, %221) # /python3.6/site-packages/torch/nn/modules/rnn.py:658:68
                  %238 : bool = aten::__is__(%state0.1, %207) # /python3.6/site-packages/torch/nn/modules/rnn.py:662:11
                  %hx.2 : (Tensor, Tensor) = prim::If(%238) # /python3.6/site-packages/torch/nn/modules/rnn.py:662:8
                    block0():
                      %240 : int = prim::dtype(%y3.2)
                      %241 : Device = prim::device(%y3.2)
                      %242 : int[] = prim::ListConstruct(%222, %max_batch_size.2, %223)
                      %h_zeros.2 : Tensor = aten::zeros(%242, %240, %207, %241, %207) # /python3.6/site-packages/torch/nn/modules/rnn.py:665:22
                      %c_zeros.2 : Tensor = aten::zeros(%242, %240, %207, %241, %207) # /python3.6/site-packages/torch/nn/modules/rnn.py:668:22
                      %hx.4 : (Tensor, Tensor) = prim::TupleConstruct(%h_zeros.2, %c_zeros.2)
                      -> (%hx.4)
                    block1():
                      %hx0.2 : (Tensor, Tensor) = prim::unchecked_cast(%state0.1)
                      -> (%hx0.2)
                  %247 : int = aten::dim(%y3.2) # /python3.6/site-packages/torch/nn/modules/rnn.py:200:11
                  %248 : bool = aten::ne(%247, %224) # /python3.6/site-packages/torch/nn/modules/rnn.py:200:11
                   = prim::If(%248) # /python3.6/site-packages/torch/nn/modules/rnn.py:200:8
                    block0():
                      %249 : str = aten::format(%219, %224, %247) # /python3.6/site-packages/torch/nn/modules/rnn.py:202:16
                       = prim::RaiseException(%249) # /python3.6/site-packages/torch/nn/modules/rnn.py:201:12
                      -> ()
                    block1():
                      -> ()
                  %250 : int = aten::size(%y3.2, %225) # /python3.6/site-packages/torch/nn/modules/rnn.py:204:30
                  %251 : bool = aten::ne(%223, %250) # /python3.6/site-packages/torch/nn/modules/rnn.py:204:11
                   = prim::If(%251) # /python3.6/site-packages/torch/nn/modules/rnn.py:204:8
                    block0():
                      %252 : str = aten::format(%218, %223, %250) # /python3.6/site-packages/torch/nn/modules/rnn.py:206:16
                       = prim::RaiseException(%252) # /python3.6/site-packages/torch/nn/modules/rnn.py:205:12
                      -> ()
                    block1():
                      -> ()
                  %253 : Tensor = prim::TupleIndex(%hx.2, %220)
                  %expected_hidden_size.2 : (int, int, int) = prim::TupleConstruct(%222, %max_batch_size.2, %223)
                  %255 : int[] = aten::size(%253) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:11
                  %256 : int[] = prim::ListConstruct(%222, %max_batch_size.2, %223)
                  %257 : bool = aten::ne(%255, %256) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:11
                   = prim::If(%257) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:8
                    block0():
                      %258 : int[] = aten::size(%253) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:69
                      %259 : int[] = aten::list(%258) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:64
                      %260 : str = aten::format(%217, %expected_hidden_size.2, %259) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:31
                       = prim::RaiseException(%260) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:12
                      -> ()
                    block1():
                      -> ()
                  %261 : Tensor = prim::TupleIndex(%hx.2, %221)
                  %262 : int[] = aten::size(%261) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:11
                  %263 : bool = aten::ne(%262, %256) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:11
                   = prim::If(%263) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:8
                    block0():
                      %264 : int[] = aten::size(%261) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:69
                      %265 : int[] = aten::list(%264) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:64
                      %266 : str = aten::format(%216, %expected_hidden_size.2, %265) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:31
                       = prim::RaiseException(%266) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:12
                      -> ()
                    block1():
                      -> ()
                  %267 : Tensor, %268 : Tensor = prim::TupleUnpack(%hx.2)
                  %269 : Tensor[] = prim::ListConstruct(%267, %268)
                  %270 : Tensor, %271 : Tensor, %272 : Tensor = aten::lstm(%y3.2, %269, %self_dec_rnn_lstm__flat_weights.2, %202, %222, %226, %203, %203, %203) # /python3.6/site-packages/torch/nn/modules/rnn.py:679:21
                  %hidden.7 : (Tensor, Tensor) = prim::TupleConstruct(%271, %272)
                  %274 : Tensor = prim::data(%270)
                  %275 : Tensor = aten::dropout_(%274, %226, %203) # /python3.6/site-packages/torch/nn/functional.py:1168:11
                  %g.2 : Tensor = aten::transpose(%270, %220, %221) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:146:12
                  %277 : (Tensor, (Tensor, Tensor)) = prim::TupleConstruct(%g.2, %hidden.7)
                  -> (%277)
                block1():
                  %278 : int = prim::GetAttr[name="_blank_id"](%self.1)
                  %279 : bool = aten::gt(%190, %278) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:107:11
                  %label0 : int = prim::If(%279) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:107:8
                    block0():
                      %label0.1 : int = aten::sub(%190, %195) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:108:12
                      -> (%label0.1)
                    block1():
                      -> (%190)
                  %282 : int[] = prim::ListConstruct(%label0)
                  %283 : int[][] = prim::ListConstruct(%282)
                  %label1.1 : Tensor = aten::tensor(%283, %196, %194, %193) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/decoders.py:109:16
                  %285 : __torch__.model_separable_rnnt.RNNT = prim::GetAttr[name="_model"](%self.1)
                  %286 : __torch__.model_separable_rnnt.___torch_mangle_13.Prediction = prim::GetAttr[name="prediction"](%285)
                  %287 : bool = prim::Constant[value=1]() # /python3.6/site-packages/torch/nn/modules/rnn.py:679:61
                  %288 : bool = prim::Constant[value=0]()
                  %289 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
                  %y0.1 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
                  %291 : str = prim::Constant[value="AssertionError: "]() # :0:0
                  %292 : NoneType = prim::Constant() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:126:16
                  %293 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
                  %294 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
                  %295 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
                  %296 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
                  %297 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
                  %298 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
                  %299 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
                  %300 : Tensor = prim::Constant[value=<Tensor>]() # :0:0
                  %301 : str = prim::Constant[value="Expected hidden[1] size {}, got {}"]() # /python3.6/site-packages/torch/nn/modules/rnn.py:624:31
                  %302 : str = prim::Constant[value="Expected hidden[0] size {}, got {}"]() # /python3.6/site-packages/torch/nn/modules/rnn.py:622:31
                  %303 : str = prim::Constant[value="input.size(-1) must be equal to input_size. Expected {}, got {}"]() # /python3.6/site-packages/torch/nn/modules/rnn.py:206:16
                  %304 : str = prim::Constant[value="input must have {} dimensions, got {}"]() # /python3.6/site-packages/torch/nn/modules/rnn.py:202:16
                  %305 : int = prim::Constant[value=0]() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:144:24
                  %306 : int = prim::Constant[value=1]() # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:131:16
                  %307 : int = prim::Constant[value=2]() # :0:0
                  %308 : int = prim::Constant[value=320]() # :0:0
                  %309 : int = prim::Constant[value=3]() # /python3.6/site-packages/torch/nn/modules/rnn.py:199:63
                  %310 : int = prim::Constant[value=-1]() # /python3.6/site-packages/torch/nn/functional.py:2030:22
                  %311 : float = prim::Constant[value=0.32000000000000001]() # /python3.6/site-packages/torch/nn/modules/rnn.py:680:30
                  %self_dec_rnn_lstm__flat_weights.1 : Tensor[] = prim::ListConstruct(%300, %299, %298, %297, %296, %295, %294, %293)
                  %313 : (Tensor, Tensor)? = prim::Uninitialized() # :0:0
                  %314 : bool = aten::__is__(%label1.1, %292) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:126:11
                  %y0 : Tensor, %state0 : (Tensor, Tensor)? = prim::If(%314) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:126:8
                    block0():
                      %317 : bool = aten::__is__(%hidden0.9, %292) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:129:19
                      %state1 : (Tensor, Tensor)? = prim::If(%317) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:129:12
                        block0():
                          -> (%hidden0.9)
                        block1():
                           = prim::RaiseException(%291) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:129:12
                          -> (%313)
                      -> (%y0.1, %state1)
                    block1():
                      %y1.1 : Tensor = prim::unchecked_cast(%label1.1)
                      %y2.1 : Tensor = aten::embedding(%289, %y1.1, %310, %288, %288) # /python3.6/site-packages/torch/nn/functional.py:2043:11
                      -> (%y2.1, %hidden0.9)
                  %y3.1 : Tensor = aten::transpose(%y0, %305, %306) # /mlperf-rnnt/inference/speech_recognition/rnnt/pytorch/model_separable_rnnt.py:144:12
                  %max_batch_size.1 : int = aten::size(%y3.1, %306) # /python3.6/site-packages/torch/nn/modules/rnn.py:658:68
                  %323 : bool = aten::__is__(%state0, %292) # /python3.6/site-packages/torch/nn/modules/rnn.py:662:11
                  %hx : (Tensor, Tensor) = prim::If(%323) # /python3.6/site-packages/torch/nn/modules/rnn.py:662:8
                    block0():
                      %325 : int = prim::dtype(%y3.1)
                      %326 : Device = prim::device(%y3.1)
                      %327 : int[] = prim::ListConstruct(%307, %max_batch_size.1, %308)
                      %h_zeros.1 : Tensor = aten::zeros(%327, %325, %292, %326, %292) # /python3.6/site-packages/torch/nn/modules/rnn.py:665:22
                      %c_zeros.1 : Tensor = aten::zeros(%327, %325, %292, %326, %292) # /python3.6/site-packages/torch/nn/modules/rnn.py:668:22
                      %hx.1 : (Tensor, Tensor) = prim::TupleConstruct(%h_zeros.1, %c_zeros.1)
                      -> (%hx.1)
                    block1():
                      %hx0.1 : (Tensor, Tensor) = prim::unchecked_cast(%state0)
                      -> (%hx0.1)
                  %332 : int = aten::dim(%y3.1) # /python3.6/site-packages/torch/nn/modules/rnn.py:200:11
                  %333 : bool = aten::ne(%332, %309) # /python3.6/site-packages/torch/nn/modules/rnn.py:200:11
                   = prim::If(%333) # /python3.6/site-packages/torch/nn/modules/rnn.py:200:8
                    block0():
                      %334 : str = aten::format(%304, %309, %332) # /python3.6/site-packages/torch/nn/modules/rnn.py:202:16
                       = prim::RaiseException(%334) # /python3.6/site-packages/torch/nn/modules/rnn.py:201:12
                      -> ()
                    block1():
                      -> ()
                  %335 : int = aten::size(%y3.1, %310) # /python3.6/site-packages/torch/nn/modules/rnn.py:204:30
                  %336 : bool = aten::ne(%308, %335) # /python3.6/site-packages/torch/nn/modules/rnn.py:204:11
                   = prim::If(%336) # /python3.6/site-packages/torch/nn/modules/rnn.py:204:8
                    block0():
                      %337 : str = aten::format(%303, %308, %335) # /python3.6/site-packages/torch/nn/modules/rnn.py:206:16
                       = prim::RaiseException(%337) # /python3.6/site-packages/torch/nn/modules/rnn.py:205:12
                      -> ()
                    block1():
                      -> ()
                  %338 : Tensor = prim::TupleIndex(%hx, %305)
                  %expected_hidden_size.1 : (int, int, int) = prim::TupleConstruct(%307, %max_batch_size.1, %308)
                  %340 : int[] = aten::size(%338) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:11
                  %341 : int[] = prim::ListConstruct(%307, %max_batch_size.1, %308)
                  %342 : bool = aten::ne(%340, %341) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:11
                   = prim::If(%342) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:8
                    block0():
                      %343 : int[] = aten::size(%338) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:69
                      %344 : int[] = aten::list(%343) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:64
                      %345 : str = aten::format(%302, %expected_hidden_size.1, %344) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:31
                       = prim::RaiseException(%345) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:12
                      -> ()
                    block1():
                      -> ()
                  %346 : Tensor = prim::TupleIndex(%hx, %306)
                  %347 : int[] = aten::size(%346) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:11
                  %348 : bool = aten::ne(%347, %341) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:11
                   = prim::If(%348) # /python3.6/site-packages/torch/nn/modules/rnn.py:225:8
                    block0():
                      %349 : int[] = aten::size(%346) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:69
                      %350 : int[] = aten::list(%349) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:64
                      %351 : str = aten::format(%301, %expected_hidden_size.1, %350) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:31
                       = prim::RaiseException(%351) # /python3.6/site-packages/torch/nn/modules/rnn.py:226:12
                      -> ()
                    block1():
                      -> ()
                  %352 : Tensor, %353 : Tensor = prim::TupleUnpack(%hx)
                  %354 : Tensor[] = prim::ListConstruct(%352, %353)
                  %355 : Tensor, %356 : Tensor, %357 : Tensor = aten::lstm(%y3.1, %354, %self_dec_rnn_lstm__flat_weights.1, %287, %307, %311, %288, %288, %288) # /python3.6/site-packages/torch/nn/modules/rnn.py:679:21
                  %hidden.1 : (Tensor, Tensor) = prim::TupleConstruct(%356, %357)

Please use torch.jit.trace, not script.

1 Like

This model contains control flow, cannot be exported by torch.jit.trace.