How to use dynamic input width for conv1d?

Hey, While compiling a single layer conv1d model I am unable to lower the IRModule and obtain the following error:

TVMError: Failed to vectorize loop with extent (any_dim - 2)

Can someone suggest how to fix this error or how to add the feature to the library? I have already added a few features to the library and can potentially help out with this.

I am using commit 73887156321fcee1700ef8661f052d8d38022a4d of the repo

Here’s the minimal reproducible code I am using for testing:

import tvm
import numpy as np
from tvm import relay

def prepare_net(input_channels, output_channels, dtype):
    data_shape = (1, input_channels, relay.Any())
    y = relay.var("data", shape=data_shape, dtype=dtype)
    y = relay.nn.conv1d(
        y,
        relay.var("c1.w"),
        channels=output_channels,
        kernel_size=(3,),
        strides=(1,)
    )
    args = relay.analysis.free_vars(y)
    net = relay.Function(args, y)
    return net

def prepare_mod_params(net):
    mod = tvm.IRModule.from_expr(net)
    mod = relay.transform.InferType()(mod)
    shape_dict = {v.name_hint: v.checked_type for v in mod["main"].params}
    params = {}
    for k, v in shape_dict.items():
        if k == "data":
            continue
        init_value = np.random.randn(*v.concrete_shape).astype(v.dtype)
        params[k] = tvm.nd.array(init_value, device=tvm.cpu(0))
    return mod, params

def prepare_vm(mod, params):
    target = tvm.target.Target("llvm")
    with tvm.transform.PassContext(opt_level=3):
        exec = relay.vm.compile(mod, target=target, params=params)
        code, lib = exec.save()
        des_exec = tvm.runtime.vm.Executable.load_exec(code, lib)
        return tvm.runtime.vm.VirtualMachine(des_exec, tvm.cpu())

def test():
    input_channels = 32
    output_channels = 1
    dtype = "float32"
    net = prepare_net(input_channels, output_channels, dtype)
    print("net prepared")
    mod, params = prepare_mod_params(net)
    print("mod_params prepared")
    vm = prepare_vm(mod, params)
    print("vm prepared")

    data_shape = [1, input_channels, 32]
    data = np.random.uniform(size=data_shape).astype(dtype)
    result = vm.run(data)
    print(data.shape, result.shape)
    print(result)

if __name__ == "__main__":
    test()

And here’s the full stacktrace of the error

(base) root@f318f8011a56:/home/workspace/sample/puretvm/dynlen_conv1d# python build_model.py 
net prepared
mod_params prepared
Traceback (most recent call last):
  File "build_model.py", line 62, in <module>
    test()
  File "build_model.py", line 51, in test
    vm = prepare_vm(mod, params)
  File "build_model.py", line 37, in prepare_vm
    exec = relay.vm.compile(mod, target=target, params=params)
  File "/opt/tvm/python/tvm/relay/backend/vm.py", line 72, in compile
    compiler.lower(mod, target)
  File "/opt/tvm/python/tvm/relay/backend/vm.py", line 143, in lower
    self._lower(mod, target, target_host)
  File "tvm/_ffi/_cython/./packed_func.pxi", line 323, in tvm._ffi._cy3.core.PackedFuncBase.__call__
  File "tvm/_ffi/_cython/./packed_func.pxi", line 257, in tvm._ffi._cy3.core.FuncCall
  File "tvm/_ffi/_cython/./packed_func.pxi", line 246, in tvm._ffi._cy3.core.FuncCall3
  File "tvm/_ffi/_cython/./base.pxi", line 163, in tvm._ffi._cy3.core.CALL
tvm._ffi.base.TVMError: Traceback (most recent call last):
  33: TVMFuncCall
  32: std::_Function_handler<void (tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*), tvm::relay::vm::VMCompiler::GetFunction(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, tvm::runtime::ObjectPtr<tvm::runtime::Object> const&)::{lambda(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)#1}>::_M_invoke(std::_Any_data const&, tvm::runtime::TVMArgs&&, tvm::runtime::TVMRetValue*&&)
  31: tvm::relay::vm::VMCompiler::Lower(tvm::IRModule, tvm::runtime::Map<tvm::Integer, tvm::Target, void, void> const&, tvm::Target const&)
  30: tvm::relay::vm::VMFunctionCompiler::Compile(tvm::GlobalVar const&, tvm::relay::Function const&)
  29: tvm::relay::ExprFunctor<void (tvm::RelayExpr const&)>::VisitExpr(tvm::RelayExpr const&)
  28: tvm::relay::vm::VMFunctionCompiler::VisitExpr_(tvm::relay::LetNode const*)
  27: tvm::relay::ExprFunctor<void (tvm::RelayExpr const&)>::VisitExpr(tvm::RelayExpr const&)
  26: tvm::relay::vm::VMFunctionCompiler::VisitExpr_(tvm::relay::CallNode const*)
  25: tvm::relay::OpMatch<void>::operator()(tvm::relay::Call const&)
  24: std::_Function_handler<void (tvm::runtime::Array<tvm::RelayExpr, void> const&, tvm::Attrs const&, tvm::runtime::Array<tvm::Type, void> const&), tvm::relay::vm::VMFunctionCompiler::VisitExpr_(tvm::relay::CallNode const*)::{lambda(tvm::runtime::Array<tvm::RelayExpr, void> const&, tvm::Attrs const&, tvm::runtime::Array<tvm::Type, void> const&)#1}>::_M_invoke(std::_Any_data const&, tvm::runtime::Array<tvm::RelayExpr, void> const&, tvm::Attrs const&, tvm::runtime::Array<tvm::Type, void> const&)
  23: tvm::relay::vm::VMFunctionCompiler::EmitInvokeTVMOp(tvm::relay::Function const&, tvm::RelayExpr const&, tvm::RelayExpr const&)
  22: tvm::relay::CompileEngineImpl::Lower(tvm::relay::tec::CCacheKey const&, std::function<tvm::runtime::String (tvm::runtime::String)>)
  21: tvm::relay::CompileEngineImpl::LowerInternal(tvm::relay::tec::CCacheKey const&, std::function<tvm::runtime::String (tvm::runtime::String)>)
  20: tvm::LowerSchedule(tvm::te::Schedule, tvm::runtime::Array<tvm::te::Tensor, void> const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::unordered_map<tvm::te::Tensor, tvm::tir::Buffer, std::hash<tvm::te::Tensor>, std::equal_to<tvm::te::Tensor>, std::allocator<std::pair<tvm::te::Tensor const, tvm::tir::Buffer> > > const&, bool)
  19: tvm::LowerSchedule(tvm::te::Schedule, tvm::runtime::Array<tvm::runtime::ObjectRef, void> const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::unordered_map<tvm::te::Tensor, tvm::tir::Buffer, std::hash<tvm::te::Tensor>, std::equal_to<tvm::te::Tensor>, std::allocator<std::pair<tvm::te::Tensor const, tvm::tir::Buffer> > > const&, bool)
  18: tvm::LowerWithPassList(tvm::IRModule, tvm::runtime::Array<tvm::transform::Pass, void>)
  17: tvm::transform::Pass::operator()(tvm::IRModule) const
  16: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
  15: tvm::transform::SequentialNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
  14: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
  13: tvm::tir::transform::PrimFuncPassNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
  12: std::_Function_handler<void (tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*), tvm::runtime::TypedPackedFunc<tvm::tir::PrimFunc (tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext)>::AssignTypedLambda<tvm::tir::transform::VectorizeLoop(bool)::{lambda(tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext)#1}>(tvm::tir::transform::VectorizeLoop(bool)::{lambda(tvm::tir::PrimFunc, tvm::IRModule, tvm::transform::PassContext)#1})::{lambda(tvm::runtime::TVMArgs const&, tvm::runtime::TVMRetValue*)#1}>::_M_invoke(std::_Any_data const&, tvm::runtime::TVMArgs&&, tvm::runtime::TVMRetValue*&&)
  11: _ZZN3tvm3tir11StmtFunctorIFNS0_4StmtERKS2_EE10InitVTableEvENUlRKNS_7r
  10: tvm::tir::StmtMutator::VisitStmt_(tvm::tir::AttrStmtNode const*)
  9: tvm::tir::StmtMutator::VisitStmt(tvm::tir::Stmt const&)
  8: _ZZN3tvm3tir11StmtFunctorIFNS0_4StmtERKS2_EE10InitVTableEvENUlRKNS_7r
  7: tvm::tir::StmtMutator::VisitStmt_(tvm::tir::AllocateNode const*)
  6: tvm::tir::StmtMutator::VisitStmt(tvm::tir::Stmt const&)
  5: _ZZN3tvm3tir11StmtFunctorIFNS0_4StmtERKS2_EE10InitVTableEvENUlRKNS_7runt
  4: tvm::tir::StmtMutator::VisitStmt_(tvm::tir::SeqStmtNode const*)
  3: void tvm::runtime::Array<tvm::tir::Stmt, void>::MutateByApply<tvm::tir::StmtMutator::Internal::Mutate(tvm::tir::StmtMutator*, tvm::runtime::Array<tvm::tir::Stmt, void> const&)::{lambda(tvm::tir::Stmt const&)#1}>(tvm::tir::StmtMutator::Internal::Mutate(tvm::tir::StmtMutator*, tvm::runtime::Array<tvm::tir::Stmt, void> const&)::{lambda(tvm::tir::Stmt const&)#1})
  2: tvm::tir::StmtMutator::VisitStmt(tvm::tir::Stmt const&)
  1: _ZZN3tvm3tir11StmtFunctorIFNS0_4StmtERKS2_EE10InitVTableEvENUlRKNS_7r
  0: tvm::tir::LoopVectorizer::VisitStmt_(tvm::tir::ForNode const*)
  File "../src/tir/transforms/vectorize_loop.cc", line 538
TVMError: Failed to vectorize loop with extent (any_dim - 2)