I managed to get it to compile on an arm64 linux docker image, I used the tensorflow one from arm (I used armswdev/tensorflow-arm-neoverse:r22.09-tf-2.10.0-eigen, it uses python 3.8) - I am still in the process of building the arm image in the tvm repo for later work but it’s painfully slow on apple m1.
I share the final script if it can help - I basically followed @r.stahl advice.
from PIL import Image
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import preprocess_input, ResNet50
from tvm.micro.testing.aot_test_utils import AOT_DEFAULT_RUNNER
import tvm
from tvm import relay
from backend import VanillaAcceleratorBackend
from tvm.relay import transform
from tvm.testing.aot import (
AOTTestModel as AOTModel,
generate_ref_data,
compile_and_run,
)
from tvm.contrib.download import download_testdata
def load_test_image(size=224):
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((size, size))
return img
def main():
export_directory = tvm.contrib.utils.tempdir(keep_for_debug=True).path
print(f"Exporting build to {export_directory}")
use_unpacked_api = True
interface_api = "c"
test_runner = AOT_DEFAULT_RUNNER
# We need to fix the model input shape before importing to tvm
keras_model = tf.keras.Sequential([
tf.keras.Input([224, 224, 3], 1, name="input"),
ResNet50(),
])
shape_dict = dict(input0=(1, 224, 224, 3))
mod, params = relay.frontend.from_keras(keras_model, shape_dict, layout='NHWC')
mod = transform.InferType()(mod)
uma_backend = VanillaAcceleratorBackend()
uma_backend.register()
target = tvm.target.Target("vanilla_accelerator", host=tvm.target.Target("c"))
target_c = tvm.target.Target("c")
img = np.array(load_test_image())[np.newaxis, :].astype("float32")
data = preprocess_input(img)
input_list = {"input0": data}
output_list = generate_ref_data(mod, input_list, params)
mod = uma_backend.partition(mod)
aot_test_model = AOTModel(module=mod, inputs=input_list, outputs=output_list, params=params)
compile_and_run(
aot_test_model,
test_runner,
interface_api,
use_unpacked_api,
workspace_byte_alignment=1,
debug_calculated_workspaces=False,
target=[target_c, target],
test_dir=export_directory,
)
print(f"Build exported to {export_directory}")
if __name__ == "__main__":
main()
One thing that bugged me for a while is that I was trying with a fresh instance of Conv2D and random input data as an example, however this was producing error because compile_and_run will record the original model output and compare it to the compiled model output, and in this random case the original output had nan and this was creating issues during the testing.
I will try again on native macOS to see if I can get it to work now and post updates here, hopefully soon, after which this post could be closed.