Subject: Why is inference so slow when compiling ONNX with TVM and using automatic optimization?
Background: I am using TVM version 0.20.0 to compile an ONNX model into a .so library and optimize it with relax.transform. The compiled .so library runs inference very slowly, taking more than 20 seconds.
(OnnxRuntime single-threaded inference takes 300ms, OpenVINO single-threaded inference takes 50ms)
import os
import onnx
import numpy as np
import tvm
from tvm import relax
from tvm.contrib import utils
import time
from tvm import relax
from tvm.relax import transform
from tvm.ir import Op
from tvm.ir import transform
def fix_pass_ordering_and_evaluation():
"""修复Pass顺序和性能评估问题的完整解决方案"""
# 设置模型路径
model_input = "custom.onnx"
output_dir = "./tvm_output_fixed"
os.makedirs(output_dir, exist_ok=True)
try:
# 1. 加载ONNX模型
print("🔍 加载ONNX模型...")
onnx_model = onnx.load(model_input)
onnx.checker.check_model(onnx_model)
# 2. 转换为Relax IR
from tvm.relax.frontend import onnx as relax_onnx
# 分析输入形状
input_shape = {}
for input_node in onnx_model.graph.input:
shape = []
for dim in input_node.type.tensor_type.shape.dim:
dim_value = dim.dim_value if dim.dim_value > 0 else 1
shape.append(dim_value)
input_name = input_node.name
input_shape[input_name] = tuple(shape)
print(f"输入: {input_name}, 形状: {shape}")
print("🔄 转换ONNX到Relax IR...")
mod = relax_onnx.from_onnx(onnx_model)
print("✅ ONNX到Relax转换成功")
import tvm
from tvm import relax
# 3. 【关键修复1】使用简单的target避免兼容性问题
target = tvm.target.Target("llvm -mcpu=skylake-avx512 -mattr=+avx2,+fma,+avx512f,+avx2 -num-cores 96")
print(f"✅ Target创建成功: {target}")
# 4. 【关键修复2】使用正确的编译流程,避免Pass顺序问题
print("🔧 编译模型...")
"""备用编译策略:手动控制Pass序列"""
print("🛡️ 使用备用编译策略...")
# print(f"optmize before: {mod.script()}")
import tvm.relax.transform
# mod = ConvolutionOptimizer()(mod)
with tvm.transform.PassContext(opt_level=3,
config={
"tir.enable_debug": False,
"tir.disable_vectorize": False,
"tir.merge_static_smem": True,
"tir.enable_buffer_level_predication": True,
# "tir.UnrollLoop": True,
# "tir.disable_storage_rewrite": False,
}):
# lib = relax.build(mod, target=target)
# 应用硬件感知优化
# mod = relax.transform.FoldConstant()(mod)
# mod = relax.transform.CombineParallelMatmul()(mod)
# mod = relax.transform.LegalizeOps()(mod)
mod = relax.get_pipeline("static_shape_tuning", target=target, total_trials=0, cpu_weight_prepack=True)(mod)
mod = relax.transform.StaticPlanBlockMemory()(mod)
# mod = relax.transform.StorageLegalizer()(mod) # 现在可以安全执行
print(f"optmize after: {mod.script()}")
lib = relax.build(mod, target=target)
print("✅ 备用编译成功")
# 5. 保存结果
model_name = os.path.splitext(os.path.basename(model_input))[0]
so_path = os.path.join(output_dir, f"{model_name}.so")
lib.export_library(so_path)
print(f"✅ 已导出共享库: {so_path}")
# 6. 【关键修复3】正确的性能测试方法
print("📊 开始性能测试...")
dev = tvm.device(str(target), 0)
# 准备测试数据
shape = (1, 3, 544, 960)
dtype = "float32"
input_data = tvm.nd.array(np.random.rand(*shape).astype(dtype), device=dev)
# 使用虚拟机进行性能测试
try:
vm = relax.VirtualMachine(lib, dev)
# 预热运行
for _ in range(1):
vm["main"](input_data)
timing_results = []
for i in range(1):
start_time = time.perf_counter()
result = vm["main"](input_data)
end_time = time.perf_counter()
elapsed_ms = (end_time - start_time) * 1000
timing_results.append(elapsed_ms)
print(f"执行 {i+1}: {elapsed_ms:.2f} ms")
avg_time = np.mean(timing_results)
std_time = np.std(timing_results)
print(f"📊 平均执行时间: {avg_time:.2f} ms (±{std_time:.2f} ms)")
return lib, so_path, avg_time
except Exception as e:
print(f"❌ 性能测试失败: {e}")
# 返回编译结果,即使性能测试失败
return lib, so_path, None
except Exception as e:
print(f"💥 整体流程失败: {e}")
import traceback
traceback.print_exc()
return None, None, None
def main_fixed():
"""修复后的主函数"""
print("=" * 60)
print("TVM ONNX模型编译 - 修复版")
print("=" * 60)
# 执行修复后的流程
lib, so_path, performance = fix_pass_ordering_and_evaluation()
if lib is not None:
print(f"🎉 编译成功! 输出文件: {so_path}")
if performance is not None:
print(f"🚀 性能结果: {performance:.2f} ms")
else:
print("⚠️ 性能测试未完成,但模型已成功编译")
else:
print("❌ 编译失败,请检查错误信息")
return lib, so_path, performance
# 运行修复后的代码
if __name__ == "__main__":
lib, so_path, performance = main_fixed()
备用编译成功
已导出共享库: ./tvm_output_fixed/custom.so
开始性能测试…
执行 1: 26450.62 ms
平均执行时间: 26450.62 ms (±0.00 ms)
编译成功! 输出文件: ./tvm_output_fixed/custom.so
性能结果: 26450.62 ms