|
在用rknn_toolkit=1.3.0转换ONNX时,发现对于ConvTranspose和PRelu存在尺寸判断错误。
测试使用代码如下:
- import os
- import onnx
- from onnx import optimizer, shape_inference, version_converter, TensorProto
- import onnxruntime
- import numpy as np
- import cv2
- # import torch
- from rknn.api import RKNN
- np.random.seed(100)
- # torch.manual_seed(100)
- class ONNXModule(object):
- def __init__(self, name):
- super().__init__()
- self._name = name
- self._node_list, self._init_tensor, self._inpt_tensor, self._out_tensor = [], [], [], []
- self._graph = None
- @property
- def name(self):
- return self._name
- @property
- def model(self):
- inpt_value_info = []
- out_value_info = []
- init_value_data = []
- for init_t in self._inpt_tensor + self._init_tensor:
- inpt_value_info.append(onnx.helper.make_tensor_value_info(
- init_t[0], init_t[2], tuple(init_t[1])))
- for init_t in self._init_tensor:
- init_value_data.append(onnx.helper.make_tensor(
- init_t[0], init_t[2], tuple(init_t[1]),
- np.random.randn(
- *init_t[1]).astype(np.float32, order='C').flatten()))
- for init_t in self._out_tensor:
- out_value_info.append(onnx.helper.make_tensor_value_info(
- init_t[0], init_t[2], tuple(init_t[1])))
- _graph = onnx.helper.make_graph(
- self._node_list, self._name,
- inpt_value_info, out_value_info, init_value_data
- )
- return onnx.helper.make_model(_graph)
- def __call__(self, inpt):
- pass
- class M0(ONNXModule):
- # Good one
- def __init__(self):
- super().__init__('M0')
- # 1x3x10x10
- # self.conv = torch.nn.Conv2d(
- # 3, 13, kernel_size=(2, 2), stride=2)
- conv = onnx.helper.make_node(
- 'Conv',
- inputs=['input', 'conv_W', 'conv_b'],
- outputs=['conv_1'],
- kernel_shape=[2, 2],
- strides=[2, 2]
- )
- # 1x13x5x5
- # self.relu = torch.nn.ReLU()
- relu = onnx.helper.make_node(
- 'Relu',
- inputs=['conv_1'],
- outputs=['relu_1'],
- )
- self._node_list.extend([conv, relu])
- self._inpt_tensor.append(('input', [1, 3, 10, 10], TensorProto.FLOAT))
- self._init_tensor.append(('conv_W', [13, 3, 2, 2], TensorProto.FLOAT))
- self._init_tensor.append(('conv_b', [13], TensorProto.FLOAT))
- self._out_tensor.append(('relu_1', [1, 13, 5, 5], TensorProto.FLOAT))
- def __call__(self, inpt):
- pass
- # x = self.conv(inpt)
- # x = self.relu(x)
- # return x
- class M1(ONNXModule):
- # Deconv Out Channel Error
- def __init__(self):
- super().__init__('M1')
- # 1x3x10x10
- # self.deconv = torch.nn.ConvTranspose2d(
- # 3, 13, kernel_size=(2, 2), stride=2)
- deconv = onnx.helper.make_node(
- 'ConvTranspose',
- inputs=['input', 'deconv_W', 'deconv_b'],
- outputs=['deconv_1'],
- kernel_shape=[2, 2],
- strides=[2, 2]
- )
- # 1x13x20x20
- # self.relu = torch.nn.ReLU()
- relu = onnx.helper.make_node(
- 'Relu',
- inputs=['deconv_1'],
- outputs=['relu_1'],
- )
- self._node_list.extend([deconv, relu])
- self._inpt_tensor.append(('input', [1, 3, 10, 10], TensorProto.FLOAT))
- self._init_tensor.append(
- ('deconv_W', [3, 13, 2, 2], TensorProto.FLOAT))
- self._init_tensor.append(('deconv_b', [13], TensorProto.FLOAT))
- self._out_tensor.append(('relu_1', [1, 13, 20, 20], TensorProto.FLOAT))
- def forward(self, inpt):
- pass
- # x = self.deconv(inpt)
- # x = self.relu(x)
- # return x
- class M2(ONNXModule):
- # PReLU error
- def __init__(self):
- super().__init__('M2')
- # 1x3x10x10
- # self.conv = torch.nn.ConvTranspose2d(
- # 3, 6, kernel_size=(3, 3), stride=1, padding=1)
- conv = onnx.helper.make_node(
- 'Conv',
- inputs=['input', 'conv_W', 'conv_b'],
- outputs=['conv_1'],
- kernel_shape=[3, 3],
- strides=[1, 1],
- pads=[1, 1, 1, 1]
- )
- # 1x6x10x10
- # self.prelu = torch.nn.PReLU(6)
- prelu = onnx.helper.make_node(
- 'PRelu',
- inputs=['conv_1', 'slop_1'],
- outputs=['prelu_1']
- )
- self._node_list.extend([conv, prelu])
- self._inpt_tensor.append(('input', [1, 3, 10, 10], TensorProto.FLOAT))
- self._init_tensor.append(('conv_W', [6, 3, 3, 3], TensorProto.FLOAT))
- self._init_tensor.append(('conv_b', [6], TensorProto.FLOAT))
- self._init_tensor.append(('slop_1', [1, 6, 1, 1], TensorProto.FLOAT))
- self._out_tensor.append(('prelu_1', [1, 6, 10, 10], TensorProto.FLOAT))
- def forward(self, inpt):
- pass
- # x = self.conv(inpt)
- # x = self.prelu(x)
- # return x
- class M3(ONNXModule):
- # Sub error
- def __init__(self):
- super().__init__('M3')
- # 1x3x10x10
- conv_1 = onnx.helper.make_node(
- 'Conv',
- inputs=['input', 'conv_1W', 'conv_1b'],
- outputs=['conv_1'],
- kernel_shape=[2, 2],
- strides=[2, 2],
- pads=[0, 0, 0, 0]
- )
- # 1x3x10x10
- max_pool = onnx.helper.make_node(
- 'MaxPool',
- inputs=['input'],
- outputs=['maxpool_1'],
- kernel_shape=[2, 2],
- strides=[2, 2]
- )
- sub = onnx.helper.make_node(
- 'Sub',
- inputs=['maxpool_1', 'conv_1'],
- outputs=['result']
- )
- self._node_list.extend([conv_1, max_pool, sub])
- self._inpt_tensor.append(('input', [1, 3, 10, 10], TensorProto.FLOAT))
- self._init_tensor.append(('conv_1W', [3, 3, 2, 2], TensorProto.FLOAT))
- self._init_tensor.append(('conv_1b', [3], TensorProto.FLOAT))
- self._out_tensor.append(('result', [1, 3, 5, 5], TensorProto.FLOAT))
- def forward(self, inpt):
- pass
- def export_onnx(inst_, input_, name):
- output = inst_(input_)
- onnx_m = inst_.model
- onnx_m = shape_inference.infer_shapes(onnx_m)
- onnx_m = optimizer.optimize(onnx_m)
- onnx.checker.check_model(onnx_m)
- onnx.save(onnx_m, name)
- def export_rknn(onnx_, rknn_, dataset_f='./dataset.txt'):
- assert os.path.exists(onnx_), f"onnx file '{onnx_}' not exist"
- rknn = RKNN()
- rknn.config(
- channel_mean_value='127.5 127.5 127.5 127.5', reorder_channel='0 1 2',
- quantized_dtype='dynamic_fixed_point-16',
- epochs=1000)
- rknn.load_onnx(model=onnx_)
- rknn.build(do_quantization=True, dataset=dataset_f)
- rknn.export_rknn(rknn_)
- rknn.release()
- def run_rknn(rknn_, onnx_, inpt):
- rknn = RKNN()
- rknn.config(
- channel_mean_value='127.5 127.5 127.5 127.5', reorder_channel='0 1 2')
- rknn.load_rknn(rknn_)
- rknn.init_runtime()
- rk_out = rknn.inference([inpt], data_format='nchw')
- t_inpt = inpt.astype(np.float32, order='C')
- t_inpt -= np.array([127.5, 127.5, 127.5], dtype=np.float32).reshape(1, 3, 1, 1)
- t_inpt /= 127.5
- # with torch.no_grad():
- # th_out = inst_(t_inpt)
- onrt = onnxruntime.InferenceSession(onnx_, None)
- th_out = onrt.run([], {'input': t_inpt})
- if isinstance(th_out, (list, tuple)):
- pass
- else:
- th_out = [th_out]
- # th_out = [t.cpu().numpy() for t in th_out]
- for rk_o, th_o in zip(rk_out, th_out):
- print(rk_o.shape, th_o.shape)
- assert rk_o.size == th_o.size, "size mismatch ({rk_o.size}) ({th_o.size})"
- rk_o = rk_o.reshape(th_o.shape)
- print('abs mean: ', np.abs(rk_o - th_o).mean())
- # rknn.release()
- DATASET_NUM = 1
- # dataset_file = open('Data/TOM/')
- all_img = np.random.randint(0, high=256, size=(
- DATASET_NUM, 10, 10, 3), dtype=np.uint8)
- dataset_f = open('./dataset.txt', 'w')
- os.makedirs('input', exist_ok=True)
- for i in range(DATASET_NUM):
- img = all_img[i]
- cv2.imwrite(f'input/{i}.png', img)
- dataset_f.write(f'input/{i}.png\n')
- dataset_f.close()
- test_data = cv2.imread('input/0.png')
- test_data = cv2.cvtColor(test_data, cv2.COLOR_BGR2RGB)
- test_data = np.transpose(
- test_data[None, :, :, :], (0, 3, 1, 2)).copy() # input used for test
- # input_data = torch.from_numpy(test_data).float() # input used for export
- input_data = test_data
- M0_inst = M0()
- M1_inst = M1()
- M2_inst = M2()
- # M3_inst = M3()
- m_l = [M0_inst, M1_inst, M2_inst]
- n_l = [m.name for m in m_l]
- ox_l = [m.name+'.onnx' for m in m_l]
- rk_l = [m.name+'.rknn' for m in m_l]
- for m, ox in zip(m_l, ox_l):
- export_onnx(m, input_data, ox)
- for ox, rk, m, n in zip(ox_l, rk_l, m_l, n_l):
- print(f"=====start {n}========")
- try:
- export_rknn(ox, rk)
- print(f"{n} export_rknn ok")
- run_rknn(rk, ox, test_data)
- print(f"{n} run_rknn ok")
- except (Exception, TypeError, NameError) as ex:
- print(ex)
- print(f"{n} failed")
- finally:
- print(f"======end {n}=========")
- print()
在M1中使用ConvTranpose,在M2中使用PRelu,分别报如下错误
M1:
E ValueError: output_shape does not match filter's output channels, 3 != 13
M2:
E ValueError: Dimensions must be equal, but are 6 and 10 for 'PRelu_prelu_1_1_1/mul' (op: 'Mul') with input shapes: [1,6,1,1], [1,10,10,6].
|
|