How can I use TVM quantization?

import tensorflow as tf
import tvm
from tvm import relay
import os
import numpy as np
import glob
from tvm.contrib import graph_runtime
from PIL import Image
import tvm.relay.testing.tf as tf_testing
from tvm.contrib import graph_runtime
from tvm.contrib import utils
from tvm import rpc
from tvm.contrib.download import download_testdata



import os

try:
    tf_compat_v1 = tf.compat.v1
except ImportError:
    tf_compat_v1 = tf

model_path = '/home/mobilev2/mobilenet_v2_1.0_224.pb'

with tf_compat_v1.gfile.GFile(model_path, "rb") as f:
    graph_def = tf_compat_v1.GraphDef()
    graph_def.ParseFromString(f.read())
    graph = tf.import_graph_def(graph_def, name="")
    # Call the utility to import the graph definition into default graph.
    graph_def = tf_testing.ProcessGraphDefParam(graph_def)

target = tvm.target.arm_cpu("rasp3b")
layout = None

shape_dict = {"input": (1, 224, 224, 3)}


mod, params = relay.frontend.from_tensorflow(graph_def, layout=layout, shape=shape_dict)


with relay.quantize.qconfig(calibrate_mode="global_scale", global_scale=8.0):
    mod = relay.quantize.quantize(mod, params)

error message

Traceback (most recent call last):
  File "rpi_relay_int8.py", line 175, in <module>
    mod = relay.quantize.quantize(mod, params)
  File "/tvm/python/tvm/relay/quantize/quantize.py", line 371, in quantize
    mod = quantize_seq(mod)
  File "/tvm/python/tvm/ir/transform.py", line 127, in __call__
    return _ffi_transform_api.RunPass(self, mod)
  File "tvm/_ffi/_cython/./packed_func.pxi", line 322, in tvm._ffi._cy3.core.PackedFuncBase.__call__
    FuncCall(self.chandle, args, &ret_val, &ret_tcode)
  File "tvm/_ffi/_cython/./packed_func.pxi", line 257, in tvm._ffi._cy3.core.FuncCall
    FuncCall3(chandle, args, nargs, ret_val, ret_tcode)
  File "tvm/_ffi/_cython/./packed_func.pxi", line 246, in tvm._ffi._cy3.core.FuncCall3
    CALL(TVMFuncCall(chandle, &values[0], &tcodes[0],
  File "tvm/_ffi/_cython/./base.pxi", line 160, in tvm._ffi._cy3.core.CALL
    raise get_last_ffi_error()
tvm._ffi.base.TVMError: Traceback (most recent call last):
  [bt] (8) /tvm/build/libtvm.so(tvm::relay::MixedModeVisitor::VisitLeaf(tvm::RelayExpr const&)+0x4d) [0x7f198675babd]
  [bt] (7) /tvm/build/libtvm.so(tvm::relay::ExprFunctor<void (tvm::RelayExpr const&)>::VisitExpr(tvm::RelayExpr const&)+0x7b) [0x7f1986705abb]
  [bt] (6) /tvm/build/libtvm.so(tvm::relay::ExprVisitor::VisitExpr_(tvm::relay::IfNode const*)+0x39) [0x7f1986757629]
  [bt] (5) /tvm/build/libtvm.so(tvm::relay::MixedModeVisitor::VisitExpr(tvm::RelayExpr const&)+0x1e2) [0x7f198675be02]
  [bt] (4) /tvm/build/libtvm.so(tvm::relay::MixedModeVisitor::VisitLeaf(tvm::RelayExpr const&)+0x4d) [0x7f198675babd]
  [bt] (3) /tvm/build/libtvm.so(tvm::relay::ExprFunctor<void (tvm::RelayExpr const&)>::VisitExpr(tvm::RelayExpr const&)+0x7b) [0x7f1986705abb]
  [bt] (2) /tvm/build/libtvm.so(tvm::relay::TypeVarEVisitor::VisitExpr_(tvm::ConstructorNode const*)+0x4a) [0x7f19864f4d2a]
  [bt] (1) /tvm/build/libtvm.so(tvm::IRModuleNode::LookupTypeDef(tvm::GlobalTypeVar const&) const+0x15a) [0x7f1985bd2d0a]
  [bt] (0) /tvm/build/libtvm.so(+0x6eefe2) [0x7f1985bcffe2]
  File "/tvm/src/ir/module.cc", line 272
TVMError:
---------------------------------------------------------------
An internal invariant was violated during the execution of TVM.
Please read TVM's error reporting guidelines.
More details can be found here: https://discuss.tvm.ai/t/error-reporting/7793.
---------------------------------------------------------------
  Check failed: it != type_definitions.end() == false: There is no definition of List

Question

I want to do TV quantification with PBfile input. (not use the get_network function)

Use relay.frontend.from_tensorflow functions only and build works well.

  1. what do you mean by that? (Check failed: it != type_definitions.end() == false: There is no definition of List )

  2. How can I use TVM quantization? (input : pbfile)

2 Likes

I encountered the similiar problem when trying to add quantization part in from_tensorflow.py example. Are there any updates of this problem? Or is it not possible to use quantization on tensorflow models currently? Thanks a lot!

This problem can be solved by https://discuss.tvm.apache.org/t/pytorch-quantization-error-during-quantization/7167?u=abraham.