UT runtime error when using AutoTVM

The test code I used is bellow.

import os
import numpy as np

import tvm
from tvm import autotvm
from tvm.relay import testing
from tvm.autotvm.tuner import XGBTuner

target = "llvm"

batch_size = 1
dtype = "float32"
log_file = "log.log"

tuning_option = {
    "log_filename": log_file,
    "tuner": "random",
    "early_stopping": None,
    "measure_option": autotvm.measure_option(
        builder=autotvm.LocalBuilder(),
        runner=autotvm.LocalRunner(
            number=1, repeat=10, min_repeat_ms=0, enable_cpu_cache_flush=True
        ),
    ),
}

def tune_kernels(tasks, measure_option, tuner="gridsearch", early_stopping=None, log_filename="tuning.log"):
    for i, task in enumerate(tasks):
        prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
    
        tuner_obj = XGBTuner(task, loss_type="rank")
        # do tuning
        n_trial = len(task.config_space)
        tuner_obj.tune(
            n_trial=n_trial,
            early_stopping=early_stopping,
            measure_option=measure_option,
            callbacks=[
                autotvm.callback.progress_bar(n_trial, prefix=prefix),
                autotvm.callback.log_to_file(log_filename),
            ],
        )

def tune_and_evaluate(tuning_opt):
    # extract workloads from relay program
    print("Extract tasks...")
    data_shape = (1, 3, 224, 224)
    weight_shape = (16, 3, 3, 3)

    data = tvm.relay.var("data", tvm.relay.TensorType(data_shape, "float32"))
    weight = tvm.relay.var("weight", tvm.relay.TensorType(weight_shape, "float32"))
    conv = tvm.relay.nn.conv2d(
        data,
        weight,
        padding=(1, 1),
        kernel_size=(3, 3),
        data_layout="NCHW",
        kernel_layout="OIHW",
        out_dtype="float32",
    )

    # func = tvm.relay.Function([data, weight], conv)
    mod, params = testing.create_workload(conv)
    tasks = autotvm.task.extract_from_program(
        mod, target=target, params=params
    )

    # run tuning tasks
    tune_kernels(tasks, **tuning_opt)


tune_and_evaluate(tuning_option)

but got runtime errors just like below hint:

WARNING:autotvm:Too many errors happen in the tuning. Switching to debug mode.
WARNING:root:Could not find any valid schedule for task Task(func_name=conv2d_NCHWc.x86, args=(('TENSOR', (1, 3, 224, 224), 'float32'), ('TENSOR', (16, 3, 3, 3), 'float32'), (1, 1), (1, 1, 1, 1), (1, 1), 'NCHW', 'NCHW', 'float32'), kwargs={}, workload=('conv2d_NCHWc.x86', ('TENSOR', (1, 3, 224, 224), 'float32'), ('TENSOR', (16, 3, 3, 3), 'float32'), (1, 1), (1, 1, 1, 1), (1, 1), 'NCHW', 'NCHW', 'float32')). A file containing the errors has been written to /var/folders/cp/j9j88nhd5p18tk8p5pbs3sbh0000gp/T/tvm_tuning_errors_w9tvzokt.log.
DEBUG:autotvm:No: 153	GFLOPS: 0.00/0.00	result: MeasureResult(costs=(RuntimeError('Traceback (most recent call last):\n                     
Traceback (most recent call last):
  [bt] (8) 9   _ctypes.cpython-38-darwin.so        0x0000000102d90590 _ctypes_callproc + 1200
  [bt] (7) 8   libffi.8.dylib                      0x0000000102d5d74c ffi_call_int + 1208
  [bt] (6) 7   libffi.8.dylib                      0x0000000102d6004c ffi_call_SYSV + 76
  [bt] (5) 6   libtvm.dylib                        0x000000011a9e24f0 TVMModGetFunction + 188
  [bt] (4) 5   libtvm.dylib                        0x000000011a9fe544 tvm::runtime::ModuleNode::GetFunction(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, bool) + 116
  [bt] (3) 4   libtvm.dylib                        0x000000011aa61350 tvm::runtime::RPCModuleNode::GetFunction(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, tvm::runtime::ObjectPtr<tvm::runtime::Object> const&) + 496
  [bt] (2) 3   libtvm.dylib                        0x000000011aa59a8c tvm::runtime::RPCClientSession::GetFunction(std::__1::basic_string<char, 

Any suggestion for this error?