Adding new relay operator as Pass

I want to add a relay operator and package this operator as a relay pass, but when executing, it prompts segmentation fault Error, unable to perform pass operation, can someone help me? Thanks :heart:

when i use

custom_op0 = relay.op.get("add")

and it can run normally as i expected.

import numpy as np
import tvm
import tvm.relay as relay


def example():
    shape = (1, 64, 54, 54)
    c_data = np.empty(shape).astype("float32")
    c = relay.const(c_data)
    weight = relay.var("weight", shape=(64, 64, 3, 3))
    x = relay.var("x", relay.TensorType((1, 64, 56, 56), "float32"))
    conv = relay.nn.conv2d(x, weight)
    y = relay.add(c, c)
    y = relay.multiply(y, relay.const(2, "float32"))
    y = relay.add(conv, y)
    z = relay.add(y, c)
    z1 = relay.add(y, c)
    z2 = relay.add(z, z1)
    return relay.Function([x, weight], z2)

    
@relay.op.op.register_compute("new_add")
def new_add_compute(data1,data2):
    return relay.add(data1,data2)


@relay.transform.function_pass(opt_level=3)
class CustomPipeline:
    """Simple test function to replace one argument to another."""

    def __init__(self):
        pass

    def transform_function(self, func, mod, ctx):

        class ConvAdd(tvm.relay.ExprMutator):
            def visit_call(self, callnode):
                if (callnode.op.name == "add"):

                    custom_op0 = relay.op.get("new_add")
                    #custom_op0 = relay.op.get("add")
                    new_call = relay.Call(custom_op0, [callnode.args[1], callnode.args[0]]) 
                    return super().visit_call(new_call)
                else:
                    return super().visit_call(callnode)

        return ConvAdd().visit(func)


if __name__ == '__main__':
    f = example()
    mod = tvm.IRModule.from_expr(f)
    custom_pass = CustomPipeline()
    print(mod)
    mod3 = custom_pass(mod)
    print(mod3)
1 Like

The generation of operators appears to be incorrect here, but I’m not sure where the issue is. Can someone help me with this? Thank you very much.

Take a look at this Python side relay operation register I like to use:

import numpy as np
import tvm
from tvm import relay, ir, target, te, topi, tir
from tvm.relay import reg
from tvm.relay.op.strategy import wrap_topi_schedule

def example():
    shape = (1, 64, 54, 54)
    c_data = np.empty(shape).astype("float32")
    c = relay.const(c_data)
    weight = relay.var("weight", shape=(64, 64, 3, 3))
    x = relay.var("x", relay.TensorType((1, 64, 56, 56), "float32"))
    conv = relay.nn.conv2d(x, weight)
    y = relay.add(c, c)
    y = relay.multiply(y, relay.const(2, "float32"))
    y = relay.add(conv, y)
    z = relay.add(y, c)
    z1 = relay.add(y, c)
    z2 = relay.add(z, z1)
    return relay.Function([x, weight], z2)



def compute_new_add(attrs, inputs, output_type):
    C = te.compute(
        output_type.shape, lambda *i: inputs[0](*i) + inputs[1](*i), name="C"
    )
    return [C]

def rel_new_add(arg_types, attrs):
    a_shape = arg_types[0].shape
    a_type = arg_types[0].dtype
    return relay.TensorType(a_shape, a_type)

@target.override_native_generic_func("strategy_new_add")
def strategy_new_add(attrs, inputs, out_type, target):
    strategy = relay.op.OpStrategy()
    strategy.add_implementation(
        compute_new_add,
        wrap_topi_schedule(topi.generic.schedule_extern),
        name="new_add.generic",
    )
    return strategy

def register_new_add():
    op_name = "new_add"
    reg.register(op_name, "Customize Add Function.")
    op = reg.get(op_name)
    op.set_support_level(10)
    op.add_type_rel(op_name + "_rel", rel_new_add)
    op.add_argument("lhs", "Tensor", "The left hand side tensor.")
    op.add_argument("rhs", "Tensor", "The right hand side tensor.")
    op.set_attrs_type_key("DictAttrs")
    reg.register_pattern(op_name, relay.op.OpPattern.OUT_ELEMWISE_FUSABLE)
    reg.register_strategy(op_name, strategy_new_add)

register_new_add()



@relay.transform.function_pass(opt_level=3)
class CustomPipeline:
    """Simple test function to replace one argument to another."""

    def __init__(self):
        pass

    def transform_function(self, func, mod, ctx):

        class ConvAdd(tvm.relay.ExprMutator):
            def visit_call(self, callnode):
                if (callnode.op.name == "add"):
                    print("find an add node")
                    data_0 = self.visit(callnode.args[0])
                    data_1 = self.visit(callnode.args[1])
                    custom_op0 = relay.op.get("new_add")
                    print(custom_op0)
                    
                    new_call = relay.Call(custom_op0, [data_0, data_1], callnode.attrs, callnode.type_args) 
                    return new_call
                return super().visit_call(callnode)

        return ConvAdd().visit(func)


if __name__ == '__main__':
    f = example()
    mod = tvm.IRModule.from_expr(f)
    custom_pass = CustomPipeline()
    print(mod)
    mod3 = custom_pass(mod)
    print(mod3)
def @main(%x: Tensor[(1, 64, 56, 56), float32] /* ty=Tensor[(1, 64, 56, 56), float32] */, %weight: Tensor[(64, 64, 3, 3), float32] /* ty=Tensor[(64, 64, 3, 3), float32] */) -> Tensor[(1, 64, 54, 54), float32] {
  %0 = new_add(meta[relay.Constant][0] /* ty=Tensor[(1, 64, 54, 54), float32] */, meta[relay.Constant][0] /* ty=Tensor[(1, 64, 54, 54), float32] */) /* ty=Tensor[(1, 64, 54, 54), float32] */;
  %1 = nn.conv2d(%x, %weight, padding=[0, 0, 0, 0]) /* ty=Tensor[(1, 64, 54, 54), float32] */;
  %2 = multiply(%0, 2f /* ty=float32 */) /* ty=Tensor[(1, 64, 54, 54), float32] */;
  %3 = new_add(%1, %2) /* ty=Tensor[(1, 64, 54, 54), float32] */;
  %4 = new_add(%3, meta[relay.Constant][0] /* ty=Tensor[(1, 64, 54, 54), float32] */) /* ty=Tensor[(1, 64, 54, 54), float32] */;
  %5 = new_add(%3, meta[relay.Constant][0] /* ty=Tensor[(1, 64, 54, 54), float32] */) /* ty=Tensor[(1, 64, 54, 54), float32] */;
  new_add(%4, %5) /* ty=Tensor[(1, 64, 54, 54), float32] */
}

1 Like

Thank you! it helps. I think the issue is resolved.Thank you very much! but I’m still curious why my code is not running