Toybrick

关于RK 输入单通道灰度图片要怎么进行设置呢

tof3d

中级会员

积分
210
楼主
发表于 2019-3-6 10:29:14    查看: 37264|回复: 23 | [复制链接]    打印 | 只看该作者
本帖最后由 tof3d 于 2019-3-6 12:18 编辑

name: "set003_net"
input: "data"
input_dim: 1
input_dim: 1
input_dim: 128
input_dim: 128

回复

使用道具 举报

tof3d

中级会员

积分
210
沙发
 楼主| 发表于 2019-3-6 10:47:42 | 只看该作者
本帖最后由 tof3d 于 2019-3-6 10:49 编辑

转换   
rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2')
ret = rknn.load_caffe(model='..C_deploy_new.prototxt',        proto='caffe',        blobs='../C_new.caffemodel')

if ret != 0:
      print('Load mobilenet_v2 failed! Ret = {}'.format(ret))
      exit(ret)
     print('done')




        # Build model

        print('--> Building model')

        print('--> Building model')

        rknn.build(do_quantization=False)

        print('done')


测试
     img = cv2.imread('./ec1.jpg')

    #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    #img=cv2.resize(img,(128,128))

    img2 = cv2.imread('./ec2.jpg')

    #img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)

    img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

    img=cv2.resize(img,(128,128))

    img2=cv2.resize(img2,(128,128))

    print('--> Init runtime environment')

    ret = rknn.init_runtime()

    if ret != 0:

        print('Init runtime environment failed')

        exit(ret)

    print('done')
怎么改,支持单通道吗



    # Inference

    print('--> Running model')

    #outputs = rknn.inference(inputs=[img])

    outputs = rknn.inference(inputs=[img])
回复

使用道具 举报

tof3d

中级会员

积分
210
板凳
 楼主| 发表于 2019-3-6 11:25:39 | 只看该作者
我将rknn.config(channel_mean_value='0 0 0 1‘),但是输出output全部为0,这个什么问题呢
回复

使用道具 举报

tof3d

中级会员

积分
210
地板
 楼主| 发表于 2019-3-6 12:19:09 | 只看该作者
name: "set003_net"
layer {
  name: "input"
  type: "Input"
  top: "data"
  input_param {
    shape {
      dim: 1
      dim: 1
      dim: 128
      dim: 128
    }
  }
}
layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data"
  top: "conv1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 96
    pad: 2
    kernel_size: 5
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "slice1"
  type: "Slice"
  bottom: "conv1"
  top: "slice1_1"
  top: "slice1_2"
  slice_param {
    slice_dim: 1
  }
}
layer {
  name: "etlwise1"
  type: "Eltwise"
  bottom: "slice1_1"
  bottom: "slice1_2"
  top: "eltwise1"
  eltwise_param {
    operation: MAX
  }
}
layer {
  name: "pool1"
  type: "Pooling"
  bottom: "eltwise1"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "conv2a"
  type: "Convolution"
  bottom: "pool1"
  top: "conv2a"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 96
    kernel_size: 1
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "slice2a"
  type: "Slice"
  bottom: "conv2a"
  top: "slice2a_1"
  top: "slice2a_2"
  slice_param {
    slice_dim: 1
  }
}
layer {
  name: "etlwise2a"
  type: "Eltwise"
  bottom: "slice2a_1"
  bottom: "slice2a_2"
  top: "eltwise2a"
  eltwise_param {
    operation: MAX
  }
}
layer {
  name: "conv2"
  type: "Convolution"
  bottom: "eltwise2a"
  top: "conv2"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 192
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "slice2"
  type: "Slice"
  bottom: "conv2"
  top: "slice2_1"
  top: "slice2_2"
  slice_param {
    slice_dim: 1
  }
}
layer {
  name: "etlwise2"
  type: "Eltwise"
  bottom: "slice2_1"
  bottom: "slice2_2"
  top: "eltwise2"
  eltwise_param {
    operation: MAX
  }
}
layer {
  name: "pool2"
  type: "Pooling"
  bottom: "eltwise2"
  top: "pool2"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "conv3a"
  type: "Convolution"
  bottom: "pool2"
  top: "conv3a"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 192
    kernel_size: 1
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "slice3a"
  type: "Slice"
  bottom: "conv3a"
  top: "slice3a_1"
  top: "slice3a_2"
  slice_param {
    slice_dim: 1
  }
}
layer {
  name: "etlwise3a"
  type: "Eltwise"
  bottom: "slice3a_1"
  bottom: "slice3a_2"
  top: "eltwise3a"
  eltwise_param {
    operation: MAX
  }
}
layer {
  name: "conv3"
  type: "Convolution"
  bottom: "eltwise3a"
  top: "conv3"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 384
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "slice3"
  type: "Slice"
  bottom: "conv3"
  top: "slice3_1"
  top: "slice3_2"
  slice_param {
    slice_dim: 1
  }
}
layer {
  name: "etlwise3"
  type: "Eltwise"
  bottom: "slice3_1"
  bottom: "slice3_2"
  top: "eltwise3"
  eltwise_param {
    operation: MAX
  }
}
layer {
  name: "pool3"
  type: "Pooling"
  bottom: "eltwise3"
  top: "pool3"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "conv4a"
  type: "Convolution"
  bottom: "pool3"
  top: "conv4a"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 384
    kernel_size: 1
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "slice4a"
  type: "Slice"
  bottom: "conv4a"
  top: "slice4a_1"
  top: "slice4a_2"
  slice_param {
    slice_dim: 1
  }
}
layer {
  name: "etlwise4a"
  type: "Eltwise"
  bottom: "slice4a_1"
  bottom: "slice4a_2"
  top: "eltwise4a"
  eltwise_param {
    operation: MAX
  }
}
layer {
  name: "conv4"
  type: "Convolution"
  bottom: "eltwise4a"
  top: "conv4"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 256
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "slice4"
  type: "Slice"
  bottom: "conv4"
  top: "slice4_1"
  top: "slice4_2"
  slice_param {
    slice_dim: 1
  }
}
layer {
  name: "etlwise4"
  type: "Eltwise"
  bottom: "slice4_1"
  bottom: "slice4_2"
  top: "eltwise4"
  eltwise_param {
    operation: MAX
  }
}
layer {
  name: "conv5a"
  type: "Convolution"
  bottom: "eltwise4"
  top: "conv5a"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 256
    kernel_size: 1
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "slice5a"
  type: "Slice"
  bottom: "conv5a"
  top: "slice5a_1"
  top: "slice5a_2"
  slice_param {
    slice_dim: 1
  }
}
layer {
  name: "etlwise5a"
  type: "Eltwise"
  bottom: "slice5a_1"
  bottom: "slice5a_2"
  top: "eltwise5a"
  eltwise_param {
    operation: MAX
  }
}
layer {
  name: "conv5"
  type: "Convolution"
  bottom: "eltwise5a"
  top: "conv5"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 256
    pad: 1
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "slice5"
  type: "Slice"
  bottom: "conv5"
  top: "slice5_1"
  top: "slice5_2"
  slice_param {
    slice_dim: 1
  }
}
layer {
  name: "etlwise5"
  type: "Eltwise"
  bottom: "slice5_1"
  bottom: "slice5_2"
  top: "eltwise5"
  eltwise_param {
    operation: MAX
  }
}
layer {
  name: "pool4"
  type: "Pooling"
  bottom: "eltwise5"
  top: "pool4"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "fc1"
  type: "InnerProduct"
  bottom: "pool4"
  top: "fc1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  inner_product_param {
    num_output: 512
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "slice_fc1"
  type: "Slice"
  bottom: "fc1"
  top: "slice_fc1_1"
  top: "slice_fc1_2"
  slice_param {
    slice_dim: 1
  }
}
layer {
  name: "etlwise_fc1"
  type: "Eltwise"
  bottom: "slice_fc1_1"
  bottom: "slice_fc1_2"
  top: "eltwise_fc1"
  eltwise_param {
    operation: MAX
  }
}
#layer {
#  name: "drop1"
#  type: "Dropout"
#  bottom: "eltwise_fc1"
#  top: "eltwise_fc1"
#  dropout_param {
#    dropout_ratio: 0.7
#  }
#}

回复

使用道具 举报

seedlin

注册会员

积分
54
5#
发表于 2019-3-6 14:55:37 | 只看该作者
tof3d 发表于 2019-3-6 12:19
name: "set003_net"
layer {
  name: "input"

单通道,你config函数里面参数去掉试试
回复

使用道具 举报

tof3d

中级会员

积分
210
6#
 楼主| 发表于 2019-3-6 15:09:50 | 只看该作者
seedlin 发表于 2019-3-6 14:55
单通道,你config函数里面参数去掉试试

   img = cv2.imread('./Rec1.jpg',0)

    #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    img=cv2.resize(img,(128,128))

    img2=cv2.resize(img2,(128,128))

    print('--> Init runtime environment')

    ret = rknn.init_runtime()

    if ret != 0:

        print('Init runtime environment failed')

        exit(ret)

    print('done')



    # Inference

    print('--> Running model')

    import pdb

    pdb.set_trace()

    #outputs = rknn.inference(inputs=[img])

    img=img.reshape(1,128,128)

    outputs = rknn.inference(inputs=[img])
删除以后再rknn.inference 直接Segmentation fault (core dumped),请问传进去的image 要做shape转换吗,如果不转换也是报这个错
回复

使用道具 举报

tof3d

中级会员

积分
210
7#
 楼主| 发表于 2019-3-6 15:15:28 | 只看该作者
如图


本帖子中包含更多资源

您需要 登录 才可以下载或查看,没有帐号?立即注册

x
回复

使用道具 举报

tof3d

中级会员

积分
210
8#
 楼主| 发表于 2019-3-6 17:26:16 | 只看该作者
help  啊啊
回复

使用道具 举报

seedlin

注册会员

积分
54
9#
发表于 2019-3-6 17:43:43 | 只看该作者
tof3d 发表于 2019-3-6 15:09
img = cv2.imread('./Rec1.jpg',0)

    #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

怎么么看到rknn load_model的代码
回复

使用道具 举报

tof3d

中级会员

积分
210
10#
 楼主| 发表于 2019-3-6 18:53:27 | 只看该作者
seedlin 发表于 2019-3-6 17:43
怎么么看到rknn load_model的代码

没有贴出来
import numpy as np
import cv2
from rknn.api import RKNN
from scipy import spatial

if __name__ == '__main__':

    # Create RKNN object

    rknn = RKNN()

    rknn = RKNN(verbose=True)
   

    # pre-process config

    print('--> config model')

    #rknn.config(channel_mean_value='103.94 116.78 123.68 58.82', reorder_channel='2 1 0')

    print('done')

    # Load tensorflow model

    print('--> Loading model')

    if(1):


    #  只在屏幕打印详细的日志信息

        rknn = RKNN(verbose=True)

        #rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2')

        #rknn.config(channel_mean_value='0 0 0 1')



        ret = rknn.load_caffe(model='../C_deploy_new.prototxt',        proto='caffe',        blobs='../C_new.caffemodel')

        if ret != 0:

            print('Load mobilenet_v2 failed! Ret = {}'.format(ret))

            exit(ret)

        print('done')

        # Build model

        print('--> Building model')

        print('--> Building model')

        rknn.build(do_quantization=False)

        print('done')



    # Export rknn model

        print('--> Export RKNN model')

        ret = rknn.export_rknn('../Mobile-ligth.rknn')

        if ret != 0:

            print('Export mobilenet_v2.rknn failed!')

            exit(ret)

        print('done')

    ret = rknn.load_rknn(path='../Mobile-ligth.rknn')

    #ret = rknn.init_runtime(target='rk3399pro')   

    # Set inputs

    img = cv2.imread('./ec1.jpg')

    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)

    #img=cv2.resize(img,(128,128))

    img2 = cv2.imread('./ec2.jpg')

    #img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)

    img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

    img = cv2.imread('./ec1.jpg',0)

    #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    img=cv2.resize(img,(128,128))

    img2=cv2.resize(img2,(128,128))

    print('--> Init runtime environment')

    ret = rknn.init_runtime()

    if ret != 0:

        print('Init runtime environment failed')

        exit(ret)

    print('done')


    # Inference

    print('--> Running model')

    import pdb

    pdb.set_trace()

    #outputs = rknn.inference(inputs=[img])

    img=img.reshape(1,128,128)

    outputs = rknn.inference(inputs=[img])

    if(1):

        import pdb

        pdb.set_trace()

        outputs2 = rknn.inference(inputs=[img2])

        print(outputs,outputs2[0])

        import pdb

        #pdb.set_trace()

        result = 1 - spatial.distance.cosine(outputs, outputs2)

        print(result)

    #show_outputs(outputs)

    print('done')



    # perf

    print('--> Begin evaluate model performance')

    #perf_results = rknn.eval_perf(inputs=[img])

    print('done')



    rknn.release()



回复

使用道具 举报

您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

产品中心 购买渠道 开源社区 Wiki教程 资料下载 关于Toybrick


快速回复 返回顶部 返回列表