Hi…I am very much eager to convert the following code into opencv GPU, what is the correct way to do that?

import os
import cv2
import time
import numpy as np

from google.colab import drive

drive.mount('/content/gdrive')

def normalize_mat(depth_src):
      depth_min = depth_src.min()
      depth_max = depth_src.max()
      depth = (depth_src - depth_min) / (depth_max - depth_min)

     return depth

def generate_stereo(depth_dir, depth_prefix, filename):
      print("=== Start processing:", filename, "===")
      depth_src = cv2.imread(os.path.join(depth_dir, depth_prefix + filename + ".jpg"))

      if len(depth_src.shape) == 3:
          depth_src = cv2.cvtColor(depth_src, cv2.COLOR_BGR2GRAY)
      else:
         depth_src = depth_src

      depth = normalize_mat(depth_src)
      depth = np.round(depth*255).astype(int)
      cv2.imwrite(os.path.join(depth_dir, "normaized_depth_" + filename + ".jpg"), depth)

def file_processing_im(depth_dir, depth_prefix):
      for f in os.listdir(depth_dir):
           filename = f.split(".")[0]
           generate_stereo(depth_dir, depth_prefix, filename)

def main():
      start_time = time.time()
      depth_dir = 'gdrive/MyDrive/depth/'
      depth_prefix = 'Depth_'
      file_processing_im(depth_dir, depth_prefix)
      print(time.time() - start_time, "seconds for base generation")

if name == "main":
      main()

i dont think, it makes much sense to convert your code to cuda.
the only operation to optimize would be the normalization step,
and up/downloading data to the GPU would eat up all speedup.

also, if this should run on colab, you’d have to build your own cv2 from src first,
the builtin version does not support cuda

however, you can use cv2.imread(..., cv2.IMREAD_GRAYSCALE) and skip the gray conversion

Is there not any way to parallely read all images from disk and process them further?

python multi threading/processing
(opencv does not have those things on a “user level”)

Thank you so much for your support

1 Like