I have a working ONNX model (originally from TensorFlow) that works fine in opencv2 python. Its a ‘textbook’ CNN mostly for learning.
In Python it works as expected as per the examples.
In C++ Net::forward() fails with the following error (it seems the rows/cols of my image is getting confused with the number of channels).
Here is the python code:
def opencv_predict(onnx_full_model_path, class_names, test_directories):
all_test_files = []
for test_directory in test_directories:
for file in glob.glob(test_directory + '/*.jpg'):
all_test_files.append(file)
net = cv2.dnn.readNetFromONNX(onnx_full_model_path)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
for test_file in all_test_files:
image = cv2.imread(test_file)
image = cv2.resize(image, (IMG_HEIGHT, IMG_WIDTH))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = np.array([image]).astype('float32') / 255.0
net.setInput(image)
predictions = net.forward()
print("PREDICTIONS", predictions)
And here is the C++ code:
int main(int argc, char** argv)
{
std::string onnxFullModelPath = "model.onnx\\model.onnx";
Net net = readNetFromONNX(onnxFullModelPath);
string testFolder = "images_training_validate\\tests";
vector <string> allTestFiles = getFilesWithExt(testFolder, ".jpg");
int IMG_WIDTH = 224;
int IMG_HEIGHT = 224;
for (auto& testFile : allTestFiles)
{
Mat image = imread(testFile);
Mat blob;
blobFromImage(image, blob, 1.0f / 255.0f, Size(IMG_WIDTH, IMG_HEIGHT), Scalar(), true, false, CV_32F);
net.setInput(blob);
Mat prob = net.forward();
cout << prob << endl;
}
}
The TF / Keras model is:
model = Sequential([
Input(shape=(IMG_HEIGHT, IMG_WIDTH, 3)),
Conv2D(64, 3, padding='same', activation='relu'),
Conv2D(64, 3, padding='same', activation='relu'),
MaxPooling2D(),
Conv2D(128, 3, padding='same', activation='relu'),
Conv2D(128, 3, padding='same', activation='relu'),
MaxPooling2D(),
Conv2D(256, 3, padding='same', activation='relu'),
Conv2D(256, 3, padding='same', activation='relu'),
MaxPooling2D(),
Conv2D(512, 3, padding='same', activation='relu'),
Conv2D(512, 3, padding='same', activation='relu'),
MaxPooling2D(),
GlobalAveragePooling2D(),
Dense(1024, activation='relu'),
Dropout(.50),
Dense(len(class_names), activation='softmax')
])
And the call to keras2onnx is:
def keras2onnx(keras_model, onnx_full_model_path):
onnx_output_rev = 6
input_spec = (tf.TensorSpec((1, IMG_HEIGHT, IMG_WIDTH, 3), tf.float32, name="input"),)
onnx_model, _ = tf2onnx.convert.from_keras(keras_model, input_signature=input_spec, opset=onnx_output_rev, output_path=onnx_full_model_path)
return onnx_model
One more note: I can make forward() work (but not infer correctly not surprisingly) if I try resizing my blob so the order if COLS x ROWS x CHANNELS agrees with the order as expected by TF / Keras. (But neither are correct array sizes as each module expects them.)
The below works with excepting
Mat blob;
blobFromImage(image, blob, 1.0f / 255.0f, Size(IMG_WIDTH, IMG_HEIGHT), Scalar(), true, false, CV_32F);
int sz[4] = {1, IMG_WIDTH, IMG_HEIGHT, 3};
Mat blob2 = Mat(4, sz, CV_32F);
blob2.data = blob.data;
net.setInput(blob2);
Mat prob = net.forward();
Thanks for the help!