strided_slice
op not work when there is an argwhere
input pipeline, because the output shape of argwhere
is dynamic. Is there a better solution to fix that?
Error Information
Check failed: ObjectTypeChecker::Check(ptr) == false: Expect Array[IntImm] but get Array
Traceback (most recent call last):
File "test_forward.py", line 4733, in <module>
test_where_strided_slice((6), "int32")
File "test_forward.py", line 1846, in test_where_strided_slice
compare_tf_with_tvm(np_data, "in_data:0", "output:0", no_gpu=True, mode='vm')
File "test_forward.py", line 253, in compare_tf_with_tvm
cuda_layout=cuda_layout,
File "test_forward.py", line 133, in run_tvm_graph
graph_def, layout=layout, shape=shape_dict, outputs=out_names
File "/root/code/fork_tvm/tvm/python/tvm/relay/frontend/tensorflow.py", line 3633, in from_tensorflow
mod, params = g.from_tensorflow(graph, layout, shape, outputs)
File "/root/code/fork_tvm/tvm/python/tvm/relay/frontend/tensorflow.py", line 3039, in from_tensorflow
func = self._get_relay_func(graph, layout=layout, shape=shape, outputs=outputs)
File "/root/code/fork_tvm/tvm/python/tvm/relay/frontend/tensorflow.py", line 3003, in _get_relay_func
self._backtrack_construct(node.name)
File "/root/code/fork_tvm/tvm/python/tvm/relay/frontend/tensorflow.py", line 3563, in _backtrack_construct
op = self._convert_operator(node.op, node.name, inputs, attr)
File "/root/code/fork_tvm/tvm/python/tvm/relay/frontend/tensorflow.py", line 3405, in _convert_operator
sym = convert_map[op_name](inputs, attrs, self._params, self._mod)
File "/root/code/fork_tvm/tvm/python/tvm/relay/frontend/tensorflow.py", line 1725, in _impl
out = _op.strided_slice(inputs[0], begin=begin, end=end, strides=stride)
File "/root/code/fork_tvm/tvm/python/tvm/relay/op/transform.py", line 898, in strided_slice
return _make.strided_slice(data, begin, end, strides, slice_mode)
File "/root/code/fork_tvm/tvm/python/tvm/_ffi/_ctypes/packed_func.py", line 237, in __call__
raise get_last_ffi_error()
tvm._ffi.base.TVMError: Traceback (most recent call last):
[bt] (4) /root/code/fork_tvm/tvm/build/libtvm.so(TVMFuncCall+0x61) [0x7ff37abac3d1]
[bt] (3) /root/code/fork_tvm/tvm/build/libtvm.so(void tvm::runtime::TypedPackedFunc<tvm::RelayExpr (tvm::RelayExpr, tvm::runtime::Array<tvm::Integer, void>, tvm::runtime::Array<tvm::Integer, void>, tvm::runtime::Array<tvm::Integer, void>, tvm::runtime::String)>::AssignTypedLambda<tvm::RelayExpr (*)(tvm::RelayExpr, tvm::runtime::Array<tvm::Integer, void>, tvm::runtime::Array<tvm::Integer, void>, tvm::runtime::Array<tvm::Integer, void>, tvm::runtime::String)>(tvm::RelayExpr (*)(tvm::RelayExpr, tvm::runtime::Array<tvm::Integer, void>, tvm::runtime::Array<tvm::Integer, void>, tvm::runtime::Array<tvm::Integer, void>, tvm::runtime::String))::{lambda(tvm::runtime::TVMArgs const&, tvm::runtime::TVMRetValue*)#1}::operator()(tvm::runtime::TVMArgs const&, tvm::runtime::TVMRetValue*) const+0x22a) [0x7ff37a75833a]
[bt] (2) /root/code/fork_tvm/tvm/build/libtvm.so(tvm::runtime::TVMMovableArgValue_::operator tvm::runtime::Array<tvm::Integer, void><tvm::runtime::Array<tvm::Integer, void>, void>() const+0x6a) [0x7ff379f926ba]
[bt] (1) /root/code/fork_tvm/tvm/build/libtvm.so(tvm::runtime::Array<tvm::Integer, void> tvm::runtime::TVMPODValue_::AsObjectRef<tvm::runtime::Array<tvm::Integer, void> >() const+0x4ab) [0x7ff379f9259b]
[bt] (0) /root/code/fork_tvm/tvm/build/libtvm.so(dmlc::LogMessageFatal::~LogMessageFatal()+0x61) [0x7ff379e1d9a1]
File "/root/code/fork_tvm/tvm/include/tvm/runtime/packed_func.h", line 1405
TVMError:
---------------------------------------------------------------
An internal invariant was violated during the execution of TVM.
Please read TVM's error reporting guidelines.
More details can be found here: https://discuss.tvm.ai/t/error-reporting/7793.
---------------------------------------------------------------
Check failed: ObjectTypeChecker<TObjectRef>::Check(ptr) == false: Expect Array[IntImm] but get Array
Minimal code to reproduce
I added a test in test_forward.py
# tvm/tests/python/frontend/tensorflow/test_forward.py
#######################################################################
# StridedSlice
# ------------
def test_where_strided_slice(
ip_shape,
dtype):
""" One iteration of a Stridedslice """
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
weight = tf.ones((10, 20))
mask = tf.squeeze(tf.where(in_data), axis=1)
data = tf.gather(weight, mask)
data = tf.reshape(data, shape=(-1, 2, 10))
tf.strided_slice(
data,
begin=(1, 0, 0),
end=(0, 1, 0),
strides=(1, 1, 1),
begin_mask=4,
end_mask=5,
new_axis_mask=0,
shrink_axis_mask=2,
ellipsis_mask=0,
name="output",
)
np_data = (np.random.uniform(size=ip_shape) * 10).astype(dtype)
np_data[0:3] = 0
print(np_data)
compare_tf_with_tvm(np_data, "in_data:0", "output:0", no_gpu=True, mode='vm')