[Unity] [Frontend] Failed to load Range op

I use Relax onnx frontend to load onnx model with only one Range op. the model is simple, and I will upload it later.

after loaded the onnx model, I got the follow IRModule, but failed to build with relax

    @I.ir_module
class Module:
    @T.prim_func(private=True)
    def arange(A: T.Buffer((), "float32"), T_arange: T.Buffer((T.Cast("int64", T.ceil(A[()])),), "float32")):
        T.func_attr({"tir.is_scheduled": T.bool(True), "tir.noalias": T.bool(True)})
        # with T.block("root"):
        for ax0_fused_1 in T.thread_binding(T.int64(256), thread="blockIdx.x"):
            for ax0_fused_2 in T.thread_binding(T.int64(2048), thread="threadIdx.x"):
                for ax0_fused_0 in range((T.Cast("int64", T.ceil(A[()])) + T.int64(524287)) // T.int64(524288)):
                    with T.block("T_arange"):
                        v_ax0 = T.axis.spatial(T.Cast("int64", T.ceil(A[()])), ax0_fused_0 * T.int64(524288) + ax0_fused_1 * T.int64(2048) + ax0_fused_2)
                        T.where((ax0_fused_0 * T.int64(256) + ax0_fused_1) * T.int64(2048) + ax0_fused_2 < T.Cast("int64", T.ceil(A[()])))
                        T.reads()
                        T.writes(T_arange[v_ax0])
                        T_arange[v_ax0] = T.Cast("float32", v_ax0)

    @R.function
    def main(input__encoder_make_pad_mask_Cast_output_0: R.Tensor((), dtype="float32")) -> R.Tensor((T.Cast("int64", T.ceil(A[()])),), dtype="float32"):
        R.func_attr({"num_input": 1})
        cls = Module
        with R.dataflow():
            lv = R.call_tir(cls.arange, (input__encoder_make_pad_mask_Cast_output_0,), out_sinfo=R.Tensor((T.Cast("int64", T.ceil(A[()])),), dtype="float32"))
            gv: R.Tensor((T.Cast("int64", T.ceil(A[()])),), dtype="float32") = lv
            R.output(gv)
        return gv

and the compile info as follows:

      File "/home/test.py", line 182, in xxx
    ex = relax.build(relax_mod, target=target)
  File "/path/tvm_unity/python/tvm/relax/vm_build.py", line 335, in build
    mod = pipeline(mod)
  File "/path/tvm_unity/python/tvm/ir/transform.py", line 238, in __call__
    return _ffi_transform_api.RunPass(self, mod)
  File "/path/tvm_unity/python/tvm/_ffi/_ctypes/packed_func.py", line 239, in __call__
    raise_last_ffi_error()
  File "/path/tvm_unity/python/tvm/_ffi/base.py", line 481, in raise_last_ffi_error
    raise py_err
  File "Objects/call.c", line 255, in PyVectorcall_Call
  File "Python/ceval.c", line 5896, in call_function
  File "/path/tvm_unity/python/tvm/relax/pipeline.py", line 99, in _pipeline
    mod = seq(mod)
  File "/path/tvm_unity/python/tvm/ir/transform.py", line 238, in __call__
    return _ffi_transform_api.RunPass(self, mod)
  File "/path/tvm_unity/src/relax/backend/vm/vm_shape_lower.cc", line 804, in operator()
    [=](IRModule mod, PassContext pc) { return VMShapeLowerMutator::Lower(mod, emit_err_ctx); };
  File "/path/tvm_unity/src/relax/backend/vm/vm_shape_lower.cc", line 209, in tvm::relax::VMShapeLowerMutator::Lower(tvm::IRModule, bool)
    Function updated_func = mutator.Rewrite(kv.first, GetRef<Function>(func));
  File "/path/tvm_unity/src/relax/backend/vm/vm_shape_lower.cc", line 268, in tvm::relax::VMShapeLowerMutator::Rewrite(tvm::GlobalVar, tvm::relax::Function)
    auto body_seq = Downcast<SeqExpr>(this->VisitWithNewScope(func->body, func->params));
  File "/path/tvm_unity/src/relax/backend/vm/vm_shape_lower.cc", line 397, in tvm::relax::VMShapeLowerMutator::VisitExpr_(tvm::relax::ShapeExprNode const*)
    auto [code, value_or_index] = MakeSymbolicShapeArg(expr);
  File "/path/tvm_unity/src/relax/backend/vm/vm_shape_lower.cc", line 360, in tvm::relax::VMShapeLowerMutator::MakeSymbolicShapeArg(tvm::PrimExpr const&)
    ICHECK(slot->value_computed) << "PrimExpr " << expr << " has not been computed";
tvm.error.InternalError: Traceback (most recent call last):
  4: operator()
        at /path/tvm_unity/src/relax/backend/vm/vm_shape_lower.cc:804
  3: tvm::relax::VMShapeLowerMutator::Lower(tvm::IRModule, bool)
        at /path/tvm_unity/src/relax/backend/vm/vm_shape_lower.cc:209
  2: tvm::relax::VMShapeLowerMutator::Rewrite(tvm::GlobalVar, tvm::relax::Function)
        at /path/tvm_unity/src/relax/backend/vm/vm_shape_lower.cc:268
  1: tvm::relax::VMShapeLowerMutator::VisitExpr_(tvm::relax::ShapeExprNode const*)
        at /path/tvm_unity/src/relax/backend/vm/vm_shape_lower.cc:397
  0: tvm::relax::VMShapeLowerMutator::MakeSymbolicShapeArg(tvm::PrimExpr const&)
        at /path/tvm_unity/src/relax/backend/vm/vm_shape_lower.cc:360
  File "/path/tvm_unity/src/relax/backend/vm/vm_shape_lower.cc", line 360
InternalError: Check failed: (slot->value_computed) is false: PrimExpr T.int64(4) * T.Cast("int64", T.ceil(A[()])) has not been computed

Anybody any ideas, which will be grateful