How to customize fusion ops?

def example():
    x = relay.var("x", relay.TensorType((1, 3, 3, 1), "float32"))
    net = relay.nn.conv2d(x, relay.var("weight"),    
                        channels=2,
                        kernel_size=(3, 3),
                        padding=(1, 1),
                        data_layout='NHWC')
    net = relay.nn.bias_add(net, relay.var("bias"))
    net = relay.nn.relu(net)
    net = relay.add(net, relay.var("add_w", shape=[1, 3, 3, 2], dtype="float32"))
    net = relay.multiply(net, relay.var("mul_w", shape=[1, 3, 3, 2], dtype="float32"))
    net = relay.nn.softmax(net)
    return relay.Function([x], net)
f = example()
mod = tvm.relay.Module.from_expr(f)
seq = relay.transform.Sequential([
    relay.transform.SimplifyInference(),
    relay.transform.FoldConstant(),
    relay.transform.EliminateCommonSubexpr(),
    relay.transform.FoldScaleAxis(),
    relay.transform.FuseOps()])
with relay.build_config(opt_level=3):
    mod = seq(mod)
print(mod)

For the above code, conv2d, bias_add, relu, add, multiply will be fused by default. But I want to only fuse conv2d, bias_add and relu, ant I don’t want to fuse add or multiply. Any good suggestions? Thank you!