|
板凳
楼主 |
发表于 2019-4-9 09:33:48
|
只看该作者
import numpy as np
import tensorflow as tf
import cv2
import time
import os
import cv2
from rknn.api import RKNN
def load_model():
# Create RKNN object
rknn = RKNN(verbose=True)
# Config for Model Input PreProcess
rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2')
# Direct Load RKNN Model
print('--> Loading model')
rknn.load_rknn('./net_layers.rknn')
print('done')
# init runtime environment
print('--> Init runtime environment')
ret = rknn.init_runtime()
if ret != 0:
print('Init runtime environment failed')
exit(ret)
print('done')
return rknn
def recognize(rknn, img):
img1 = cv2.resize(img, (640, 360))
img = img1[90:270, 160:480]
img = img.astype('float32')
output_model = rknn.inference(inputs=[img])
output = np.transpose(output_model[0].reshape((1, 70, 140)), (1, 2, 0))
return img, output*[0, 0, 1]
def main(rknn):
cap = cv2.VideoCapture('./output3.mp4')
# cap = cv2.VideoCapture('/dev/video1', cv2.CAP_V4L)
print(cap.isOpened())
fourcc = int(cap.get(cv2.CAP_PROP_FOURCC)) # 获取视频编码格式
fps_video = cap.get(cv2.CAP_PROP_FPS) # 当前摄像头的帧数
# fourcc = cv2.VideoWriter_fourcc(*'XVID') #定义编码格式mpge-4
# fps_video = 10
size_video = (640, 180)
name_video = 'grass'+'.avi'
# out_video_annotations = cv2.VideoWriter(os.path.join('./', name_video), fourcc, fps_video, size_video) # 定义视频文件输入对象
t0 = time.clock()
count = 0
while 1:
ret, image = cap.read()
if ret:
img, output = recognize(rknn, image)
print(output)
output = np.repeat(np.repeat(output, 2, axis=1), 2, axis=0)[:360, :640]
cv2.imshow('JPEGImages', img)
cv2.imshow('Annotations', output)
# output = np.concatenate((img, output), axis=1)
# out_video_annotations.write((255*output).astype('uint8'))
count += 1
if cv2.waitKey(10) == ord('q'):
break
dt = time.clock()-t0
print(dt, count/dt)
# out_video_annotations.release()
cap.release()
rknn.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
rknn = load_model()
main(rknn)
现在这个输出是减少的,理论上same填充应该是90x160,得到的确实valid填充70x140 |
|