Hello. i’m freash user of TVM.
i make a Lenet5 network model by using keras and load graph and weight.
json_file = open("lenet_model.json","r")
loaded_model_json = json_file.read()
json_file.close()
Network = keras.models.model_from_json( loaded_model_json )
Network.load_weights("Weight.h5")
Network.compile( loss = keras.losses.categorical_crossentropy
,optimizer= keras.optimizers.Adam()
,metrics=['accuracy'] )
dic = (batch,1,28,28)
shape_dict = {'conv2d_1_input' : dic }
mod , Tparams = tvm.relay.frontend.from_keras( Network, shape_dict )
and when i print mod, the result like below
def @main(%conv2d_1_input: Tensor[(1000, 1, 28, 28), float32], %v_param_1: Tensor[(6, 1, 3, 3), float32], %v_param_2: Tensor[(6), float32], %v_param_3: Tensor[(16, 6, 3, 3), float32], %v_param_4: Tensor[(16), float32], %v_param_5: Tensor[(120, 400), float32], %v_param_6: Tensor[(120), float32], %v_param_7: Tensor[(84, 120), float32], %v_param_8: Tensor[(84), float32], %v_param_9: Tensor[(10, 84), float32], %v_param_10: Tensor[(10), float32]) -> Tensor[(1000, 10), float32] {
%0 = nn.conv2d(%conv2d_1_input, %v_param_1, channels=6, kernel_size=[3, 3]) /* ty=Tensor[(1000, 6, 26, 26), float32] */;
%1 = nn.bias_add(%0, %v_param_2) /* ty=Tensor[(1000, 6, 26, 26), float32] */;
%2 = nn.relu(%1) /* ty=Tensor[(1000, 6, 26, 26), float32] */;
%3 = nn.avg_pool2d(%2, pool_size=[2, 2], strides=[2, 2]) /* ty=Tensor[(1000, 6, 13, 13), float32] */;
%4 = nn.conv2d(%3, %v_param_3, channels=16, kernel_size=[3, 3]) /* ty=Tensor[(1000, 16, 11, 11), float32] */;
%5 = nn.bias_add(%4, %v_param_4) /* ty=Tensor[(1000, 16, 11, 11), float32] */;
%6 = nn.relu(%5) /* ty=Tensor[(1000, 16, 11, 11), float32] */;
%7 = nn.avg_pool2d(%6, pool_size=[2, 2], strides=[2, 2]) /* ty=Tensor[(1000, 16, 5, 5), float32] */;
%8 = transpose(%7, axes=[0, 2, 3, 1]) /* ty=Tensor[(1000, 5, 5, 16), float32] */;
%9 = nn.batch_flatten(%8) /* ty=Tensor[(1000, 400), float32] */;
%10 = nn.dense(%9, %v_param_5, units=120) /* ty=Tensor[(1000, 120), float32] */;
%11 = nn.bias_add(%10, %v_param_6) /* ty=Tensor[(1000, 120), float32] */;
%12 = nn.relu(%11) /* ty=Tensor[(1000, 120), float32] */;
%13 = nn.dense(%12, %v_param_7, units=84) /* ty=Tensor[(1000, 84), float32] */;
%14 = nn.bias_add(%13, %v_param_8) /* ty=Tensor[(1000, 84), float32] */;
%15 = nn.relu(%14) /* ty=Tensor[(1000, 84), float32] */;
%16 = nn.dense(%15, %v_param_9, units=10) /* ty=Tensor[(1000, 10), float32] */;
%17 = nn.bias_add(%16, %v_param_10) /* ty=Tensor[(1000, 10), float32] */;
nn.softmax(%17, axis=1) /* ty=Tensor[(1000, 10), float32] */
}
and make same Lenet5 graph using relay api.
conv2d_1_input = relay.var("conv2d_1_input",relay.TensorType((1000,1,28,28),"float32"))
v_param_1 = relay.var("v_param_1",relay.TensorType((6,1,3,3),"float32"))
v_param_2 = relay.var("v_param_2",relay.TensorType((6,),"float32"))
v_param_3 = relay.var("v_param_3",relay.TensorType((16,6,3,3),"float32"))
v_param_4 = relay.var("v_param_4",relay.TensorType((16,),"float32"))
v_param_5 = relay.var("v_param_5",relay.TensorType((120,400),"float32"))
v_param_6 = relay.var("v_param_6",relay.TensorType((120,),"float32"))
v_param_7 = relay.var("v_param_7",relay.TensorType((84,120),"float32"))
v_param_8 = relay.var("v_param_8",relay.TensorType((84,),"float32"))
v_param_9 = relay.var("v_param_9",relay.TensorType((10,84),"float32"))
v_param_10 = relay.var("v_param_10",relay.TensorType((10,),"float32"))
l0 = relay.nn.conv2d(conv2d_1_input, v_param_1, channels=6, kernel_size=[3, 3])
l1 = relay.nn.bias_add(l0, v_param_2)
l2 = relay.nn.relu(l1)
l3 = relay.nn.avg_pool2d(l2, pool_size=[2, 2], strides=[2, 2])
l4 = relay.nn.conv2d(l3, v_param_3, channels=16, kernel_size=[3, 3])
l5 = relay.nn.bias_add(l4, v_param_4)
l6 = relay.nn.relu(l5)
l7 = relay.nn.avg_pool2d(l6, pool_size=[2, 2], strides=[2, 2])
l8 = relay.transpose(l7, axes=[0, 2, 3, 1])
l9 = relay.nn.batch_flatten(l8)
l10 = relay.nn.dense(l9,v_param_5, units=120)
l11 = relay.nn.bias_add(l10,v_param_6)
l12 = relay.nn.relu(l11)
l13 = relay.nn.dense(l12, v_param_7, units=84)
l14 = relay.nn.bias_add(l13, v_param_8)
l15 = relay.nn.relu(l14)
l16 = relay.nn.dense(l15, v_param_9, units=10)
l17 = relay.nn.bias_add(l16, v_param_10)
last = relay.nn.softmax(l17, axis=1)
Args = [ conv2d_1_input ]
Func = relay.Function(Args, last)
delta = relay.Module.from_expr(Func)
when i print delta, the printed text same to mod. but when i compile them, parameter does not mapped in delta’s parameter
with relay.transform.build_config( opt_level = 0 ):
graph , lib , params = tvm.relay.build_module.build( delta ,target=target, params=Tparams )
print(params) ## printed result -> {}
the question i have is how to manually mappoing paramter on Relay?