|
模型为https://github.com/commaai/openp ... els/supercombo.onnx,在PC端转为rknn(因为板端无法使用1.7.6的rknn_toolkit转rknn),在板端运行后,两者输出差别非常大
onnx到rknn的转换代码
rknn = RKNN(verbose=True)
print('--> Config model')
#不使用config的均值和标准差,对于多行的dataset,,需指定batch_size=1
rknn.config(target_platform='rk3399pro', optimization_level=3, output_optimize=1,batch_size=1)
# Load model
print('--> Loading model')
ret = rknn.load_onnx(model=ONNX_MODEL)
if ret != 0:
print('Load model failed!')
exit(ret)
print('done')
# Build model
print('--> Building model')
ret = rknn.build(do_quantization=True, dataset='./dataset_npy_hand255.txt')
if ret != 0:
print('Build model failed!')
exit(ret)
print('done')
# Export rknn model
print('--> Export rknn model')
ret = rknn.export_rknn(RKNN_MODEL)
rknn的运行代码
rknn = RKNN(verbose=True)
ret = rknn.load_rknn(path=RKNN_MODEL)
# Init runtime environment
print('--> Init runtime environment')
ret = rknn.init_runtime(device_id='81ab609370303eac')
if ret != 0:
print('Init runtime environment failed!')
exit(ret)
print('done')
# Set inputs
in1 = np.load('straight1/straight1.npy')
in2 = in1
in3 = np.load('datas/desire_fp32.npy')
in4 = np.load('datas/traffic_convention_fp32.npy')
in5 = np.load('datas/nav_features_fp32.npy')
in6 = np.load('datas/features_buffer_fp32.npy')
# Inference
print('--> Running model')
print("模型路径",RKNN_MODEL)
output = rknn.inference(inputs=[in1, in2, in3, in4, in5, in6])
output[0] = output[0].reshape((1, -1))
print('inference result: ', output)
output=np.array(output).reshape(6120,)
np.savetxt("./straight1/rk3399pro_straight1.txt",output)
|
|