Hi, if you see here:
in particular this function:
def _convert_dense(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
weightList = keras_layer.get_weights()
weight = etab.new_const(weightList[0].transpose([1, 0]))
params = {"weight": weight, "units": weightList[0].shape[1]}
if input_shape is None:
input_shape = keras_layer.input_shape
input_dim = len(input_shape)
# In case of RNN dense, input shape will be (1, 1, n)
if input_dim > 2:
input_shape = tuple(dim if dim else 1 for dim in _as_list(input_shape)[0])
if input_dim != 3 or input_shape[0] != 1 or input_shape[1] != 1:
raise tvm.error.OpAttributeInvalid(
"Input shape {} is not valid for operator Dense.".format(input_shape)
)
inexpr = _op.squeeze(inexpr, axis=[0])
out = _op.nn.dense(data=inexpr, **params)
if keras_layer.use_bias:
bias = etab.new_const(weightList[1])
out = _op.nn.bias_add(out, bias)
# defuse activation
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type != "linear":
out = _convert_activation(out, act_type, etab, data_layout)
if input_dim > 2:
out = _op.expand_dims(out, axis=0)
return out
Why the input shape is forced to be (1, 1, n) ? I have a RNN that need a dense layer with input (1, k, n) with k different from 1. This can be possible?