AlterOpLayout doesn't work on nn.dense?

Hi, I’m tyring to mimic the nn.conv2d unit test in test_pass_alter_op_layout.py to try on nn.dense input as below. The test basically insert a 2.0 scaling factor at an input to the OP (either nn.conv2d, or nn.dense). The only difference is the OP.

def test_alter_dense():
    def before():
        x = relay.var("x", shape=(1, 6, 6, 64))
        weight = relay.var("weight", shape=(32, 64))
        y = relay.nn.dense(
            x,
            weight
        )
        y = relay.nn.relu(y)
        y = relay.Function([x, weight], y)
        return y

    def alter_dense(attrs, inputs, tinfos, out_type):
        data, weight = inputs
        weight = relay.multiply(weight, relay.const(2.0, "float32"))
        return relay.nn.dense(data, weight, **attrs)

    with TempOpAttr("nn.dense", "FTVMAlterOpLayout", alter_dense):
        a = before()
        print(a)
        a = run_opt_pass(a, transform.AlterOpLayout())
        print(a)
        print('\n')

def test_alter_op():
    """Test directly replacing an operator with a new one"""

    def before():
        x = relay.var("x", shape=(1, 64, 56, 56))
        weight = relay.var("weight", shape=(64, 64, 3, 3))
        y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
        y = relay.nn.relu(y)
        y = relay.Function([x, weight], y)
        return y

    def alter_conv2d(attrs, inputs, tinfos, out_type):
        data, weight = inputs
        weight = relay.multiply(weight, relay.const(2.0, "float32"))
        return relay.nn.conv2d(data, weight, **attrs)

    with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
        a = before()
        print(a)
        a = run_opt_pass(a, transform.AlterOpLayout())
        print(a)
        print('\n')

Output looks like this:

fn (%x: Tensor[(1, 64, 56, 56), float32], %weight: Tensor[(64, 64, 3, 3), float32]) {
  %0 = nn.conv2d(%x, %weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
  nn.relu(%0)
}
fn (%x: Tensor[(1, 64, 56, 56), float32], %weight: Tensor[(64, 64, 3, 3), float32]) -> Tensor[(1, 64, 56, 56), float32] {
  %0 = multiply(%weight, 2f /* ty=float32 */) /* ty=Tensor[(64, 64, 3, 3), float32] */;
  %1 = nn.conv2d(%x, %0, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] */;
  nn.relu(%1) /* ty=Tensor[(1, 64, 56, 56), float32] */
}


fn (%x: Tensor[(1, 6, 6, 64), float32], %weight: Tensor[(32, 64), float32]) {
  %0 = nn.dense(%x, %weight, units=None);
  nn.relu(%0)
}
fn (%x: Tensor[(1, 6, 6, 64), float32], %weight: Tensor[(32, 64), float32]) -> Tensor[(1, 6, 6, 32), float32] {
  %0 = nn.dense(%x, %weight, units=None) /* ty=Tensor[(1, 6, 6, 32), float32] */;
  nn.relu(%0) /* ty=Tensor[(1, 6, 6, 32), float32] */
}

To my surprise, the conv2d ran well but dense doesn’t change at all. Can anyone help explain? @comaniac , maybe you have an insight here? Thank.