[TensorRT] <class 'tvm.relay.op.op_attrs.PadAttrs'> has no attribute pad_value

I try to use relay TensorRT integration to accelerate the PaddlePaddle inference, reference these relay TensorRT integration tutorials. When I run the program, the following error occurs:

File "tvm_trt.py", line 38, in <module>
    mod, config = partition_for_tensorrt(mod, params)
  File "/home/ssd2/heliqi/my_project/tvm/python/tvm/relay/op/contrib/tensorrt.py", line 158, in partition_for_tensorrt
    mod = seq(mod)
  File "/home/ssd2/heliqi/my_project/tvm/python/tvm/ir/transform.py", line 161, in __call__
    return _ffi_transform_api.RunPass(self, mod)
  File "tvm/_ffi/_cython/./packed_func.pxi", line 323, in tvm._ffi._cy3.core.PackedFuncBase.__call__
  File "tvm/_ffi/_cython/./packed_func.pxi", line 257, in tvm._ffi._cy3.core.FuncCall
  File "tvm/_ffi/_cython/./packed_func.pxi", line 246, in tvm._ffi._cy3.core.FuncCall3
  File "tvm/_ffi/_cython/./base.pxi", line 163, in tvm._ffi._cy3.core.CALL
AttributeError: Traceback (most recent call last):
  23: TVMFuncCall
  22: std::_Function_handler<void (tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*), tvm::runtime::TypedPackedFunc<tvm::IRModule (tvm::transform::Pass, tvm::IRModule)>::AssignTypedLambda<tvm::transform::{lambda(tvm::transform::Pass, tvm::IRModule)#7}>(tvm::transform::{lambda(tvm::transform::Pass, tvm::IRModule)#7}, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >)::{lambda(tvm::runtime::TVMArgs const&, tvm::runtime::TVMRetValue*)#1}>::_M_invoke(std::_Any_data const&, tvm::runtime::TVMArgs&&, tvm::runtime::TVMRetValue*&&)
  21: tvm::transform::Pass::operator()(tvm::IRModule) const
  20: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
  19: tvm::transform::SequentialNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
  18: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
  17: tvm::transform::SequentialNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
  16: tvm::transform::Pass::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
  15: tvm::relay::transform::FunctionPassNode::operator()(tvm::IRModule, tvm::transform::PassContext const&) const
  14: std::_Function_handler<void (tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*), tvm::runtime::TypedPackedFunc<tvm::relay::Function (tvm::relay::Function, tvm::IRModule, tvm::transform::PassContext)>::AssignTypedLambda<tvm::relay::transform::AnnotateTarget(tvm::runtime::Array<tvm::runtime::String, void> const&, bool)::{lambda(tvm::relay::Function, tvm::IRModule, tvm::transform::PassContext)#1}>(tvm::relay::transform::AnnotateTarget(tvm::runtime::Array<tvm::runtime::String, void> const&, bool)::{lambda(tvm::relay::Function, tvm::IRModule, tvm::transform::PassContext)#1})::{lambda(tvm::runtime::TVMArgs const&, tvm::runtime::TVMRetValue*)#1}>::_M_invoke(std::_Any_data const&, tvm::runtime::TVMArgs&&, tvm::runtime::TVMRetValue*&&)
  13: tvm::relay::annotate_target::AnnotateTarget(tvm::RelayExpr const&, tvm::runtime::Array<tvm::runtime::String, void> const&, bool)
  12: tvm::relay::PostOrderRewrite(tvm::RelayExpr const&, tvm::relay::ExprRewriter*)
  11: tvm::relay::MixedModeMutator::VisitExpr(tvm::RelayExpr const&)
  10: tvm::relay::MixedModeMutator::VisitLeaf(tvm::RelayExpr const&)
  9: tvm::relay::PostOrderRewriter::DispatchVisitExpr(tvm::RelayExpr const&)
  8: _ZZN3tvm5relay11ExprFunc
  7: tvm::relay::ExprMutator::VisitExpr_(tvm::relay::FunctionNode const*)
  6: tvm::relay::MixedModeMutator::VisitExpr(tvm::RelayExpr const&)
  5: tvm::relay::MixedModeMutator::VisitLeaf(tvm::RelayExpr const&)
  4: tvm::relay::PostOrderRewriter::DispatchVisitExpr(tvm::RelayExpr const&)
  3: tvm::relay::ExprRewriter::Rewrite(tvm::RelayExpr const&, tvm::RelayExpr const&)
  2: _ZZN3tvm5relay12ExprRewri
  1: tvm::relay::annotate_target::AnnotateTargetRewriter::Rewrite_(tvm::relay::CallNode const*, tvm::RelayExpr const&)
  0: std::_Function_handler<void (tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*), TVMFuncCreateFromCFunc::{lambda(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)#2}>::_M_invoke(std::_Any_data const&, tvm::runtime::TVMArgs&&, tvm::runtime::TVMRetValue*&&)
  File "/home/ssd2/heliqi/my_project/tvm/python/tvm/runtime/object.py", line 65, in __getattr__
    return _ffi_node_api.NodeGetAttr(self, name)
  File "tvm/_ffi/_cython/./packed_func.pxi", line 323, in tvm._ffi._cy3.core.PackedFuncBase.__call__
  File "tvm/_ffi/_cython/./packed_func.pxi", line 257, in tvm._ffi._cy3.core.FuncCall
  File "tvm/_ffi/_cython/./packed_func.pxi", line 246, in tvm._ffi._cy3.core.FuncCall3
  File "tvm/_ffi/_cython/./base.pxi", line 163, in tvm._ffi._cy3.core.CALL
  3: TVMFuncCall
  2: _ZNSt17_Function_handlerI
  1: tvm::NodeGetAttr(tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)
  0: tvm::ReflectionVTable::GetAttr(tvm::runtime::Object*, tvm::runtime::String const&) const
  File "/home/ssd2/heliqi/my_project/tvm/src/node/reflection.cc", line 109
  File "tvm/_ffi/_cython/./packed_func.pxi", line 56, in tvm._ffi._cy3.core.tvm_callback
  File "/home/ssd2/heliqi/my_project/tvm/python/tvm/relay/op/contrib/tensorrt.py", line 242, in _func_wrapper
    return checker(expr)
  File "/home/ssd2/heliqi/my_project/tvm/python/tvm/relay/op/contrib/tensorrt.py", line 733, in pad_annotate_fn
    if float(attrs.pad_value) != 0.0:
  File "/home/ssd2/heliqi/my_project/tvm/python/tvm/runtime/object.py", line 67, in __getattr__
    raise AttributeError("%s has no attribute %s" % (str(type(self)), name))
AttributeError: relay.attrs.PadAttrs object has no attributed pad_value
During handling of the above exception, another exception occurred:

AttributeError: <class 'tvm.relay.op.op_attrs.PadAttrs'> has no attribute pad_value

I see the source code(src/relay/op/nn/pad.cc) and found that attrs of pad has no pad_value attribute:

// Handler to create a call to the padding op used by front-end FFI
Expr MakePad(Expr data, Array<Array<Integer>> pad_width, Expr pad_value, String pad_mode) {
  auto attrs = make_object<PadAttrs>();
  attrs->pad_width = std::move(pad_width);
  attrs->pad_mode = std::move(pad_mode);
  static const Op& op = Op::Get("nn.pad");
  return Call(op, {data, pad_value}, Attrs(attrs), {});
}

TVM_REGISTER_GLOBAL("relay.op.nn._make.pad").set_body_typed(MakePad);

RELAY_REGISTER_OP("nn.pad")
    .describe(R"code(Pad for n-D tensor.

)code" TVM_ADD_FILELINE)
    .set_attrs_type<PadAttrs>()
    .set_num_inputs(2)
    .add_argument("data", "Tensor", "The input tensor.")
    .add_argument("pad_val", "Tensor", "The value to fill the padded area with")
    .set_support_level(2)
    .add_type_rel("Pad", PadRel)
    .set_attr<FInferCorrectLayout>("FInferCorrectLayout", PadInferCorrectLayout)
    .set_attr<TOpPattern>("TOpPattern", kInjective)
    .set_attr<FTVMCompute>("FTVMCompute", PadCompute);

Thanks for reporting this. Looks like this is a bug, pad_value should be args[1]

How can I fix this bug. Can I change the following code to solve the bug?

.add_argument("pad_value", "Tensor", "The value to fill the padded area with") // pad_val

you need to change it to pad_value = args[1] and check isinstance(pad_value, Constant) so that you can get the numeric value of pad value

Thank you for your advice. Now I can export the .so file, but there is an error when loading the .so file with C + +. I will give up this scheme for the time being.

The following is the error message when loading the .so file:

terminate called after throwing an instance of ‘tvm::runtime::InternalError’ what(): [11:18:37] …/…/src/runtime/library_module.cc:116: Binary was created using metadata but a loader of that name is not registered. Available loaders are cuda, GraphExecutorFactory, GraphRuntimeFactory. Perhaps you need to recompile with this runtime enabled. Stack trace:

This modification should not be sufficient.

Conv2d appears to have a pad_value attribute, but pad_value is argument on ‘Pad’.

1 Like