Currently we don’t support custom op. But I think if we need it, one possible way is to simulate TFLite logic in our fe parser, but I haven’t investigated whether custom op has some special attrs not covered in our parser framework.
case kTfLiteBuiltinCustom:
if (custom_name == "Convolution2DTransposeBias") {
return make_unique<Convolution2DTransposeBiasParser>();
}
class Convolution2DTransposeBiasParser : public TFLiteOperationParser {
public:
Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckTensorIsAvailable(context, tflite_node, 1));
TfLiteTransposeConvParams* tf_options = nullptr;
RETURN_IF_ERROR(RetrieveCustomInitialData(tflite_node, &tf_options));
RETURN_IF_ERROR(
CheckStrides(tf_options->stride_height, tf_options->stride_width));
return OkStatus();
}
Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration, GraphFloat32* graph,
ObjectReader* reader) final {
auto* node = graph->NewNode();
node->operation.type = ToString(OperationType::CONVOLUTION_TRANSPOSED);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
const auto* params = reinterpret_cast<const TfLiteTransposeConvParams*>(
tflite_node->custom_initial_data);
ConvolutionTransposedAttributes attr;
attr.stride =
params ? HW(params->stride_height, params->stride_width) : HW(1, 1);
RETURN_IF_ERROR(reader->ReadTensor(1, &attr.weights));
reader->ReadTensor(2, &attr.bias).IgnoreError(); // bias is optional
UpdatePadding(params->padding, graph->FindInputs(node->id)[0]->tensor.shape,
&attr);
node->operation.attributes = std::move(attr);
return OkStatus();
}
};