Error about relay.nn.conv2d: Entry function uses too much shared data

I’m trying to implement float32 ViT with tvm.relay.nn functions. But when i build my relay.nn layers i got Entry function 'tvmgen_default_fused_nn_conv2d_add_kernel0' uses too much shared data (0x3b7cc bytes, 0xc000 max) error. How to solve this error?

my tvm version is v0.9

def FP_VisionTransformer(data_shape,
                        dtype='float32',
                        patch_size=16,
                        num_patches=196,
                        in_chans=3,
                        num_classes=1000,
                        embed_dim=192,
                        depth=12,
                        num_heads=3,
                        mlp_ratio=4):
    data = relay.var('data', shape=data_shape, dtype=dtype)

    # patch embeddeing
    print('patch embedding')
    proj = layers.conv2d(data=data,
                            name='embed_conv',
                            add_bias=True,
                            input_channels=in_chans,
                            output_channels=embed_dim,
                            kernel_size=(patch_size, patch_size),
                            strides=(patch_size, patch_size),
                            padding=(0, 0),
                            data_layout='NCHW',
                            kernel_layout='OIHW')
    .... # skip


def conv2d( #skilp):
    if kernel_layout == "OIHW":
        kernel_shape = (output_channels, input_channels, kernel_size[0], kernel_size[1])
    elif kernel_layout == "HWIO":
        kernel_shape = (kernel_size[0], kernel_size[1], input_channels, output_channels)
    elif kernel_layout == "HWOI":
        kernel_shape = (kernel_size[0], kernel_size[1], output_channels, input_channels)
    elif kernel_layout == "OHWI":
        kernel_shape = (output_channels, kernel_size[0], kernel_size[1], input_channels)
    else:
        raise RuntimeError("Unsupported kernel layout {}".format(kernel_layout))

    if weight is None:
        weight = relay.var(name + "_weight", shape=kernel_shape, dtype='float32')
    conv2d_output = relay.nn.conv2d(data, weight,
                                    strides=strides,
                                    padding=padding,
                                    dilation=dilation,
                                    groups=1,
                                    channels=output_channels,
                                    kernel_size=kernel_size,
                                    data_layout=data_layout,
                                    kernel_layout=kernel_layout,
                                    out_layout=data_layout,
                                    out_dtype=out_dtype)
    if add_bias:
        if data_layout == 'NCHW':
            bias_shape = (1, output_channels, 1, 1)
        elif data_layout == 'NHWC':
            bias_shape = (1, 1, 1, output_channels)
        elif data_layout == 'HWCN':
            bias_shape = (1, 1, output_channels, 1)
        elif data_layout == 'HWNC':
            bias_shape = (1, 1, 1, output_channels)
        else:
            raise RuntimeError("Unsupported conv2d layout {}".format(data_layout))
        bias = relay.var(name + "_bias", shape=bias_shape, dtype="float32")
        add = relay.add(conv2d_output, bias)
        return add
    else:
        return conv2d_output