OpenCV DNN changing my input dimension

Finally I understand nothing…

    my_net = dnn::readNet("C:/tmp/model2b.onnx");
    int sz[] = { 1,224,224, 3 }; //orig dimension, batch extended
    Mat blob(4, sz,  CV_32F, img.ptr(0));
    my_net.setInput(blob);
    if (my_net.empty())
        cout << "Oops";
    Mat my_ouput1 = my_net.forward(), imgOut;
    cv::resize(my_ouput1.reshape(1, 224), imgOut, img.size()); // NO ERROR
    Mat my_inp = dnn::blobFromImage(img, 1.0 / 256, Size(224, 224),
        (104.00698793, 116.66876762, 122.67891434),
        false, false);
    my_net.setInput(my_inp);
    Mat my_ouput2 = my_net.forward(); // [ERROR:0@11.251] global net_impl.cpp:1182 

[ERROR:0@11.251] global net_impl.cpp:1182 cv::dnn::dnn4_v20221220::Net::Impl::getLayerShapesRecursively Exception message: OpenCV(4.7.0-dev) C:\lib\opencv\modules\dnn\src\layers\convolution_layer.cpp:417: error: (-2:Unspecified error) Number of input channels should be multiple of 3 but got 224 in function ‘cv::dnn::ConvolutionLayerImpl::getMemoryShapes’

you have to use blobFromImageWithParams and set datalayout

1 Like