[Relay] Why does dcgan (in relay.testing) have a (0, 100) shaped input?

See %data:

v0.0.3
def @main(%data: Tensor[(0, 100), float32], %dense_weight: Tensor[(16384, 100), float32], %g2_deconv_weight: Tensor[(1024, 512, 4, 4), float32], %g2_batch_norm_gamma: Tensor[(512), float32], %g2_batch_norm_beta: Tensor[(512), float32], %g2_batch_norm_moving_mean: Tensor[(512), float32], %g2_batch_norm_moving_var: Tensor[(512), float32], %g3_deconv_weight: Tensor[(512, 256, 4, 4), float32], %g3_batch_norm_gamma: Tensor[(256), float32], %g3_batch_norm_beta: Tensor[(256), float32], %g3_batch_norm_moving_mean: Tensor[(256), float32], %g3_batch_norm_moving_var: Tensor[(256), float32], %g4_deconv_weight: Tensor[(256, 128, 4, 4), float32], %g4_batch_norm_gamma: Tensor[(128), float32], %g4_batch_norm_beta: Tensor[(128), float32], %g4_batch_norm_moving_mean: Tensor[(128), float32], %g4_batch_norm_moving_var: Tensor[(128), float32], %g5_deconv_weight: Tensor[(128, 3, 4, 4), float32]) -> Tensor[(0, 3, 64, 64), float32] {
  %0 = nn.dense(%data, %dense_weight, units=16384) /* ty=Tensor[(0, 16384), float32] */;
  %1 = nn.relu(%0) /* ty=Tensor[(0, 16384), float32] */;
  %2 = reshape(%1, newshape=[-1, 1024, 4, 4]) /* ty=Tensor[(0, 1024, 4, 4), float32] */;
  %3 = nn.conv2d_transpose(%2, %g2_deconv_weight, channels=512, kernel_size=[4, 4], strides=[2, 2], padding=[1, 1]) /* ty=Tensor[(0, 512, 8, 8), float32] */;
  %4 = nn.batch_norm(%3, %g2_batch_norm_gamma, %g2_batch_norm_beta, %g2_batch_norm_moving_mean, %g2_batch_norm_moving_var, epsilon=1e-05f, scale=False) /* ty=(Tensor[(0, 512, 8, 8), float32], Tensor[(512), float32], Tensor[(512), float32]) */;
  %5 = %4.0;
  %6 = nn.relu(%5) /* ty=Tensor[(0, 512, 8, 8), float32] */;
  %7 = nn.conv2d_transpose(%6, %g3_deconv_weight, channels=256, kernel_size=[4, 4], strides=[2, 2], padding=[1, 1]) /* ty=Tensor[(0, 256, 16, 16), float32] */;
  %8 = nn.batch_norm(%7, %g3_batch_norm_gamma, %g3_batch_norm_beta, %g3_batch_norm_moving_mean, %g3_batch_norm_moving_var, epsilon=1e-05f, scale=False) /* ty=(Tensor[(0, 256, 16, 16), float32], Tensor[(256), float32], Tensor[(256), float32]) */;
  %9 = %8.0;
  %10 = nn.relu(%9) /* ty=Tensor[(0, 256, 16, 16), float32] */;
  %11 = nn.conv2d_transpose(%10, %g4_deconv_weight, channels=128, kernel_size=[4, 4], strides=[2, 2], padding=[1, 1]) /* ty=Tensor[(0, 128, 32, 32), float32] */;
  %12 = nn.batch_norm(%11, %g4_batch_norm_gamma, %g4_batch_norm_beta, %g4_batch_norm_moving_mean, %g4_batch_norm_moving_var, epsilon=1e-05f, scale=False) /* ty=(Tensor[(0, 128, 32, 32), float32], Tensor[(128), float32], Tensor[(128), float32]) */;
  %13 = %12.0;
  %14 = nn.relu(%13) /* ty=Tensor[(0, 128, 32, 32), float32] */;
  %15 = nn.conv2d_transpose(%14, %g5_deconv_weight, channels=3, kernel_size=[4, 4], strides=[2, 2], padding=[1, 1]) /* ty=Tensor[(0, 3, 64, 64), float32] */;
  tanh(%15) /* ty=Tensor[(0, 3, 64, 64), float32] */
}