Toybrick EN

中文 / EN
Toybrick EN Wiki TB-RK3399ProD Model Inference
Model Inference

This chapter focuses on how to call the RKNN Python API for model inference on the Toybrick rk3399Pro development board.

API call flow 

1551087188370663.png

Example

import numpy as np
from PIL import Image
from rknn.api import RKNN
# Analyze the output of the model to get the most probable gesture and corresponding probability
def get_predict(probability):
    data = probability[0][0]
    data = data.tolist()
    max_prob = max(data)
 
return data.index(max_prob), max_prob;
def load_model():
    # Create an RKNN execution object
    rknn = RKNN()
    # Load RKNN model
    print('-->loading model')
    rknn.load_rknn('./digital_gesture.rknn')
    print('loading model done')
    # Initialize the RKNN runtime environment
    print('--> Init runtime environment')
    ret = rknn.init_runtime(host='rk3399pro')
    if ret != 0:
       print('Init runtime environment failed')
       exit(ret)
    print('done')
    return rknn
def predict(rknn):
    im = Image.open("../picture/6_7.jpg")   # load image
    im = im.resize((64, 64),Image.ANTIALIAS)  # Image resize to 64x64
    mat = np.asarray(im.convert('RGB'))    # Convert to RGB format
    outputs = rknn.inference(inputs=[mat])   # Run forward inference and get the inference result
    pred, prob = get_predict(outputs)     # Transform the inference results into visual information
    print(prob)
    print(pred)
 
if __name__=="__main__":
    rknn = load_model()
    predict(rknn) 
 
    rknn.release()




Products Store Community Wiki Download About TB


To Top