- import os
- import urllib
- import traceback
- import time
- import sys
- import numpy as np
- import cv2
- import torch
- from rknn.api import RKNN
- from blazefacenumpy import BlazeFace
- ONNX_MODEL = 'best.onnx' # onnx 模型的路径
- ONNX_MODEL = 'rppg_new.onnx' # onnx 模型的路径
- ONNX_MODEL = 'blazeface128.onnx' # onnx 模型的路径
- # ONNX_MODEL = 'v3.onnx' # onnx 模型的路径
- # ONNX_MODEL = 'alexnet.onnx' # onnx 模型的路径
- TFLITE_MODEL = 'face_detection_front.tflite'
- # TFLITE_MODEL = 'alexnet_float32.tflite'
- # /anaconda/envs/horizon_bpu/lib/python3.8/site-packages/rknn/3rdparty/platform-tools/adb/linux-x86_64
- # tcpip
- RKNN_MODEL = './yolov8-ghost-pose.rknn' # 转换后的 RKNN 模型保存路径
- RKNN_MODEL = './bz128_3568.rknn' # 转换后的 RKNN 模型保存路径
- # RKNN_MODEL = './facenet——3568.rknn' # 转换后的 RKNN 模型保存路径
- RKNN_MODEL = './pfldv3.rknn' # 转换后的 RKNN 模型保存路径
- DATASET = './test.txt' # 数据集文件路径
-
- QUANTIZE_ON = False # 是否进行量化
- def plot_detections(img, detections):
- if isinstance(detections, torch.Tensor):
- detections = detections.cpu().numpy()
- # if detections.ndim == 1:
- # detections = np.expand_dims(detections, axis=0)
- # print("Found %d faces" % detections.shape[0])
-
- for i in range(detections.shape[0]):
- ymin = int(detections[i, 0] * img.shape[0])
- xmin = int(detections[i, 1] * img.shape[1])
- ymax = int(detections[i, 2] * img.shape[0])
- xmax = int(detections[i, 3] * img.shape[1])
- cv2.rectangle(img,(xmin,ymin),(xmax,ymax),(170,234,242),5,lineType=cv2.LINE_AA)
-
- # if with_keypoints:
- # for k in range(6):
- # kp_x = detections[i, 4 + k*2 ] * img.shape[1]
- # kp_y = detections[i, 4 + k*2 + 1] * img.shape[0]
- # circle = patches.Circle((kp_x, kp_y), radius=0.5, linewidth=1,
- # edgecolor="lightskyblue", facecolor="none",
- # alpha=detections[i, 16])
- # ax.add_patch(circle)
- if __name__ == '__main__':
-
- # 创建 RKNN 对象
- rknn = RKNN(verbose=False)
-
- # 检查 ONNX 模型文件是否存在
- if not os.path.exists(ONNX_MODEL):
- print('model not exist')
- exit(-1)
-
- # 配置模型预处理参数
- print('--> Config model')
- rknn.config(#reorder_channel='0 1 2', # 表示 RGB 通道
-
- mean_values=[[0, 0, 0]], # 每个通道的像素均值,预处理时对应通道减去该值
- std_values=[[255, 255, 255]], # 每个通道的像素标准差,每个通道除以该值
- optimization_level=3, # 优化级别
- quantized_method='layer',
- float_dtype="float16",
- target_platform = 'RK3568', #指定目标平台为rv1126
- # quantize_input_node=QUANTIZE_ON
- ) # 对时输入节点进行量化
-
- # 加载 ONNX 模型
- # print('--> Loading model')
- model = rknn.load_rknn(RKNN_MODEL)
- ret = rknn.init_runtime(target="rk3568")
- print("start")
- frame = cv2.imread('./test.jpg')
- frame =cv2.resize(frame,(112,112))
-
- back_net = BlazeFace(back_model=False).to("cpu")
-
-
- input = np.reshape(frame,-1)
-
-
- pred = rknn.inference(inputs=[input], data_format='nhwc')
- kk1 = np.reshape( pred[1],-1)[222:225]
- print(pred[1].astype(np.float16))
- kk =np.reshape( pred[0],-1)[2::10].astype(np.int16)
- print('pred')
- # print('bz load')
- # back_net.load_anchors("anchors.npy")
- # deback = back_net._tensors_to_detections(raw_box_tensor=pred[0],raw_score_tensor=pred[1],anchors=back_net.anchors)
- # # 4. Non-maximum suppression to remove overlapping detections:
- # filtered_detections = []
- # for i in range(len(deback)):
- # faces = back_net._weighted_non_max_suppression(deback[i])
-
- # faces = np.stack(faces) if len(faces) > 0 else np.zeros((0, 17))
- # filtered_detections.append(faces)
- # plot_detections(frame,filtered_detections[0])
- # cv2.imwrite('test_out.jpg',frame)
-
复制代码
欢迎光临 Toybrick (https://t.rock-chips.com/) | Powered by Discuz! X3.3 |