- import platform
- import os
- import torch
- import numpy as np
-
- from rknn.api import RKNN
-
- import onnx
- from onnx_tf.backend import prepare
-
- class LSTM(torch.nn.Module):
-
- def __init__(self, input_size, hidden_size):
- super(LSTM, self).__init__()
-
- self._input_size = input_size
- self._hidden_size = hidden_size
- self._fc = torch.nn.Linear(input_size, input_size)
- self._tanh = torch.nn.Tanh()
- self._sigmoid = torch.nn.Sigmoid()
-
- def forward(self, x, h0, c0):
-
- return self._fc(x), self._sigmoid(h0), self._sigmoid(c0)
- #ox, (h1, c1) = self._lstm(x, (h0, c0))
- return ox, h1, c1
-
- if __name__ == '__main__':
-
- system = platform.system()
-
- isize = 128
- hsize = 64
- #m = torch.nn.Linear(isize, hsize)
- m = LSTM(isize, hsize)
- m.eval()
- m.float()
-
- h0 = torch.full((1, 1, hsize), fill_value=0.15, dtype=torch.float)
- c0 = torch.full((1, 1, hsize//2), fill_value=0.07, dtype=torch.float)
- x = torch.full((1, isize), fill_value=0.1, dtype=torch.float)
- #x1 = torch.full((1, hsize), fill_value=0.2, dtype=torch.float)
- pt_file = "lstm{%dx%d}.pt" % (isize, hsize)
-
- rknn = RKNN(verbose=True)
- rknn.config(batch_size=1,
- epochs=1) # asymmetric_quantized-u8, quantized_dtype='dynamic_fixed_point-16' , channel_mean_value='0 0 0 1', reorder_channel='0 1 2',
-
- #pt = torch.jit.trace(m, x, h0, c0)
- #pt.save(pt_file)
- #rknn.load_pytorch(model=pt_file, input_size_list=[[1, isize]])
-
- onnx_file = pt_file + '.onnx'
- torch.onnx.export(m, (x, h0, c0), onnx_file,
- export_params=True, # store the trained parameter weights inside the model file
- # do_constant_folding=True, # whether to execute constant folding for optimization
- opset_version=9, # the ONNX version to export the model to
- input_names=['input', 'h0', 'c0'], # the model's input names
- output_names=['output', 'h1', 'c1'] # the model's output names
- # dynamic_axes={'input' : {0 : 'batch_size'}, # variable lenght axes
- # 'output' : {0 : 'batch_size'}}
- )
- rknn.load_onnx(model=onnx_file)
-
- rknn_file = pt_file + '.rknn'
- ret = rknn.build(do_quantization=False, dataset='./dataset.txt')
- rknn.export_rknn(rknn_file)
-
- print('--> Init runtime environment')
-
- if system == 'Windows':
- print("rknn run target rk1808")
- ret = rknn.init_runtime(target='rk1808', target_sub_class='AICS')
- else:
- print("rknn run target self")
- ret = rknn.init_runtime()
- if ret != 0:
- print('Init runtime environment failed')
- exit(ret)
- print('done')
-
- ix = x.numpy()
- tf_model = onnx.load(onnx_file)
- tf_rep = prepare(tf_model)
- tf_out, tf_h1, tf_c1 = tf_rep.run((ix, h0, c0))
- tf_out = np.array(tf_out)
- print("onnx out:", ix.shape, ', ', tf_out.shape)
- print(tf_out)
-
- torch_out, th_h1, th_c1 = m.forward(x, h0, c0)
- print("torch [out]:", x.shape, ', ', torch_out.shape)
- print(torch_out.detach().unsqueeze(0).numpy())
-
- ih0 = h0.numpy()
- ic0 = c0.numpy()
- rknn_out = rknn.inference(inputs=[(ix, ih0, ic0)], data_type='float32', data_format='nchw') # , inputs_pass_through=[1]
- rknn_out = np.array(rknn_out)
- print("rknn out:", ix.shape, ', ', rknn_out.shape)
- print(rknn_out)
-
- rknn.release()
复制代码
- ret = rknn_query(ctx, RKNN_QUERY_IN_OUT_NUM, &in_out_num, sizeof(in_out_num));
- if (ret < 0) {
- printf(_LINE_MARK_ " rknn_query fail: query in out num, ret=%d\n", ret);
- goto Error;
- }
- assert(_NInChs == in_out_num.n_input && _NOutChs == in_out_num.n_output);
- for (int i = 0; i < _NInChs; i++) {
- inputs_attr[i].index = i;
- ret = rknn_query(ctx, RKNN_QUERY_INPUT_ATTR, &(inputs_attr[i]), sizeof(inputs_attr[i]));
- if (ret < 0) {
- printf(_LINE_MARK_ " rknn_query fail: query inputs attrs, ret=%d\n", ret);
- goto Error;
- }
- }
- for (int i = 0; i < _NOutChs; i++) {
- outputs_attr[i].index = i;
- ret = rknn_query(ctx, RKNN_QUERY_OUTPUT_ATTR, &(outputs_attr[i]), sizeof(outputs_attr[i]));
- if (ret < 0) {
- printf(_LINE_MARK_ " rknn_query fail: query outputs attrs ret=%d\n", ret);
- goto Error;
- }
- }
复制代码
jefferyzhang 发表于 2020-2-23 11:04
为啥不直接用pytorch模型转rknn试试?
- import torch
- from rknn.api import RKNN
- import numpy as np
- class Net(torch.nn.Module):
- def __init__(self):
- super(Net, self).__init__()
- self.conv1 = torch.nn.Conv2d(1,6,3)
- self.conv2 = torch.nn.Conv2d(2,12,3)
- self.conv3 = torch.nn.Conv2d(3,24,3)
- def forward(self, x, y, z):
- x = self.conv1(x)
- y = self.conv2(y)
- z = self.conv3(z)
- return x,y,z
- def E_D(vector1, vector2):
- print(np.linalg.norm(vector1 - vector2))
- def cos_d(vector1, vector2):
- d = np.dot(vector1, vector2) / (np.linalg.norm(vector1) * (np.linalg.norm(vector2)))
- print(d)
- if __name__ == '__main__':
- net = Net()
- i1 = torch.rand(1,1,5,5)
- i2 = torch.rand(1,2,7,7)
- i3 = torch.rand(1,3,9,9)
- trace_model = torch.jit.trace(net, (i1,i2,i3))
- trace_model.save('test.pt')
- rknn = RKNN(verbose=True)
- rknn.config(batch_size=1,
- channel_mean_value='0 1#0 0 1#0 0 0 1',
- reorder_channel='0 1 2#0 1 2#0 1 2',
- epochs=1)
- ret = rknn.load_pytorch(model='test.pt', input_size_list=[[1,5,5],[2,7,7],[3,9,9]])
- # ret = rknn.load_onnx(model='lstm{128x64}.pt.onnx')
- if ret != 0:
- print('Load pytorch model failed!')
- exit(ret)
- ret = rknn.build(do_quantization=False, dataset='./dataset.txt')
- if ret != 0:
- print('Build pytorch failed!')
- exit(ret)
- ret = rknn.init_runtime(target='rk1808',device_id='7e9f3eb02ede60e8')
- if ret != 0:
- print('Init runtime environment failed')
- exit(ret)
- rknn_r = rknn.inference(inputs=[i1.numpy(), i2.numpy(), i3.numpy()],
- data_type='float32',
- data_format='nchw')
- pytorch_r = net(i1,i2,i3)
- for d1, d2 in zip(rknn_r, pytorch_r):
- d1 = d1.ravel()
- d2 = d2.detach().numpy().ravel()
- E_D(d1, d2)
- cos_d(d1, d2)
- print()
复制代码
jefferyzhang 发表于 2020-2-26 09:50
NPU部门回复:
1.支持多输入多输出,多个输入时shape可以不一样,但是输入要按照nchw的格式,具体可以参考m ...
欢迎光临 Toybrick (https://t.rock-chips.com/) | Powered by Discuz! X3.3 |