| 
 | 
 
class Darknet(nn.Module): 
    """YOLOv3 object detection model""" 
 
    def __init__(self, config_path, img_size=448): 
        super(Darknet, self).__init__() 
        self.img_size = img_size 
        self.seen = 0 
        self.blocks = [] 
        self.yolo_layers = [] 
 
        innitial_channel = 1 
        self.block2 = creat_block_maxpool(3, innitial_channel * 1, conv_kernel=(3, 3), conv_stride=(1, 1), conv_padding=(1, 1), pool_kernel=(2, 2), pool_stride=(2, 2)) 
        self.block4 = creat_block_maxpool(innitial_channel * 1, innitial_channel * 2, conv_kernel=(3, 3), conv_stride=(1, 1), conv_padding=(1, 1), pool_kernel=(2, 2), pool_stride=(2, 2)) 
        self.block6 = creat_block_maxpool(innitial_channel * 2, innitial_channel * 4, conv_kernel=(3, 3), conv_stride=(1, 1), conv_padding=(1, 1), pool_kernel=(2, 2), pool_stride=(2, 2)) 
 
        self.block8 = creat_block(innitial_channel * 4, innitial_channel * 8, conv_kernel=(3, 3), conv_stride=(1, 1), conv_padding=(1, 1)) 
        self.block9 = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0) 
 
        self.block10 = creat_block(innitial_channel * 8, innitial_channel * 16, conv_kernel=(3, 3), conv_stride=(1, 1), conv_padding=(1, 1)) 
        self.block11 = nn.MaxPool2d(kernel_size=(2, 2), stride=(1, 1), padding=0) 
 
        self.block12 = creat_block(innitial_channel * 16, innitial_channel * 32, conv_kernel=(3, 3), conv_stride=(1, 1), conv_padding=(1, 1)) 
        self.block13 = creat_block(innitial_channel * 32, innitial_channel * 8, conv_kernel=(3, 3), conv_stride=(1, 1), conv_padding=(1, 1)) 
        self.block14 = creat_block(innitial_channel * 8, innitial_channel * 16, conv_kernel=(3, 3), conv_stride=(1, 1), conv_padding=(1, 1)) 
        self.block15_yolo1 = creat_block(innitial_channel * 16, 3*(5+1), conv_kernel=(3, 3), conv_stride=(1, 1), conv_padding=(1, 1)) 
 
        # self.block17 = 13 
        self.block18 = creat_block(innitial_channel * 8, innitial_channel * 4, conv_kernel=(3, 3), conv_stride=(1, 1), conv_padding=(1, 1)) 
        self.block19 = torch.nn.ConvTranspose2d(innitial_channel * 4, innitial_channel * 8, kernel_size=3, stride=2, padding=0, output_padding=1, bias=True) 
        # self.block20_rote = 19+8 
        self.block21 = creat_block(innitial_channel * 8, innitial_channel * 16, conv_kernel=(3, 3), conv_stride=(1, 1), conv_padding=(1, 1)) 
        self.block22 = creat_block(innitial_channel * 16, innitial_channel * 16, conv_kernel=(3, 3), conv_stride=(1, 1), conv_padding=(1, 1)) 
        self.block23_yolo2 = nn.Conv2d(innitial_channel * 16, 3*(5+1), kernel_size=3, stride=1, padding=1) 
 
    def forward(self, x): 
 
        x2 = self.block2(x) 
        x4 = self.block4(x2) 
        x6 = self.block6(x4) 
        x8 = self.block8(x6) 
        x9 = self.block9(x8) 
        x10 = self.block10(x9) 
        x11 = self.block11(x10) 
        x12 = self.block12(x11) 
        x13 = self.block13(x12) 
 
        x14 = self.block14(x13) 
        x15 = self.block15_yolo1(x14) 
 
        x18 = self.block18(x13)# return x19 之前的结果运行通过,return x19之后的结果报错。也就是torch.nn.ConvTranspose2d 
        x19 = self.block19(x18) 
        # # print(x_8.shape, x_19.shape) 
        x20 = x8 + x19 
 
        x21 = self.block21(x20) 
        x22 = self.block22(x21) 
        x23 = self.block23_yolo2(x22) 
        # return x23 #报错 
        return x15, x18 #通过rk版本1.4.0--> config model 
done 
--> Loading model 
WARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0. 
For more information, please see: 
  * https://github.com/tensorflow/co ... 7-contrib-sunset.md 
  * https://github.com/tensorflow/addons 
If you depend on functionality not listed there, please file an issue. 
/usr/local/lib/python3.5/dist-packages/onnx_tf/common/__init__.py:87: UserWarning: FrontendHandler.get_outputs_names is deprecated. It will be removed in future release.. Use node.outputs instead. 
  warnings.warn(message) 
./rknn_models_test2/yoloV3.pt ******************** 
WARNING: Token 'COMMENT' defined, but not used 
WARNING: There is 1 unused token 
E Try match aten::_convolution_at_x19.1 ut0 failed, catch exception! 
E Catch exception when loading pytorch model: ./rknn_models_test2/yoloV3.pt! 
E Traceback (most recent call last): 
E   File "rknn/base/RKNNlib/converter/convert_pytorch.py", line 1567, in rknn.base.RKNNlib.converter.convert_pytorch.convert_pytorch.match_paragraph_and_param 
E   File "rknn/base/RKNNlib/converter/convert_pytorch.py", line 1444, in rknn.base.RKNNlib.converter.convert_pytorch.convert_pytorch._torch_try_match_ruler 
E ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all() 
E During handling of the above exception, another exception occurred: 
E Traceback (most recent call last): 
E   File "rknn/api/rknn_base.py", line 657, in rknn.api.rknn_base.RKNNBase.load_pytorch 
E   File "rknn/base/RKNNlib/app/importer/import_pytorch.py", line 104, in rknn.base.RKNNlib.app.importer.import_pytorch.ImportPytorch.run 
E   File "rknn/base/RKNNlib/converter/convert_pytorch.py", line 1590, in rknn.base.RKNNlib.converter.convert_pytorch.convert_pytorch.match_paragraph_and_param 
E   File "rknn/api/rknn_log.py", line 312, in rknn.api.rknn_log.RKNNLog.e 
E ValueError: Try match aten::_convolution_at_x19.1 ut0 failed, catch exception! 
Load pytorch model failed! 
 
 |   
 
 
 
 |