cv2.VideoCapture(0) doesn't work on android

I’m trying to create an AR project with VR using an android phone as the screen. I’m using python 3 and kivy to export to android. I’m using cv2.VideoCapture(0) to get the video on the phone, but that doesn’t work. All I see is a blank screen. I printed out the output from the caputure and it came up as None. I tried to update opencv to 4.5.2 and made the android.ndk to 25b. I also tried to use a newer android phone. I also put the camera permissions in my spec file. This is my script:

import numpy as np
from import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.image import Image as KivyImage
from import Texture
import cv2
from cv2 import aruco
from kivy.clock import Clock
from android.permissions import request_permissions, Permission

marker_dict = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
param_markers = aruco.DetectorParameters_create()

rick = cv2.imread(“rick-astley.jpg”)
rick = cv2.rotate(rick, cv2.ROTATE_180)
rick = cv2.cvtColor(rick, cv2.COLOR_BGR2RGB)


class CameraApp(App):
def build(self):
# Create a BoxLayout to hold the Image widget
layout = BoxLayout(orientation=‘vertical’)

    # Create an Image widget
    self.image = KivyImage()

    # Add the Image widget to the layout

    # Open the camera using OpenCV
    self.capture = cv2.VideoCapture(-1, cv2.CAP_ANDROID)
    self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, 3840)
    self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 2160)

    # Schedule the update method to be called regularly
    Clock.schedule_interval(self.update, 1.0 / 30.0)  # Adjust the interval as needed

    return layout

def augmentation(self, bbox, img, img_augment):
    top_left = bbox[0][0][0], bbox[0][0][1]
    top_right = bbox[0][1][0], bbox[0][1][1]
    bottom_right = bbox[0][2][0], bbox[0][2][1]
    bottom_left = bbox[0][3][0], bbox[0][3][1]

    height, width, _, = img_augment.shape

    points_1 = np.array([top_left, top_right, bottom_right, bottom_left])
    points_2 = np.float32([[0, 0], [width, 0], [width, height], [0, height]])

    matrix, _ = cv2.findHomography(points_2, points_1)
    image_out = cv2.warpPerspective(img_augment, matrix, (img.shape[1], img.shape[0]))
    cv2.fillConvexPoly(img, points_1.astype(int), (0, 0, 0))
    image_out = img + image_out

    return image_out

def update(self, dt):
    # Read a frame from the camera
    ret, frame =
    print(ret, frame)

    # Process the frame (add your processing logic here)
    if frame:
        processed_frame = cv2.rotate(frame, cv2.ROTATE_180)
        processed_frame = cv2.cvtColor(processed_frame, cv2.COLOR_BGR2RGB)
        marker_corners, marker_IDS, reject = aruco.detectMarkers(processed_frame, marker_dict, parameters=param_markers)

        if marker_IDS is not None:
            processed_frame = self.augmentation(np.array(marker_corners)[0], processed_frame, rick)

        frame_r = processed_frame[:, :round(len(processed_frame[0, :]) / 1.5)]
        frame_l = processed_frame[:, (round(len(processed_frame[0, :]) / 1.5) - int(len(processed_frame[0, :]) / 2)):(round(len(processed_frame[0,:]) / 1.5) - int(len(processed_frame[0,:]) / 2)) + len(frame_r[0, :])]

        vr_frame = np.concatenate((frame_r, frame_l), axis=1)

        # Update the Image widget with the processed frame
        self.image.texture = self.get_texture(vr_frame)

def get_texture(self, frame):
    # Convert the frame to texture
    texture = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='rgb')
    texture.blit_buffer(frame.tobytes(), colorfmt='rgb', bufferfmt='ubyte')
   return texture

if name == ‘main’:

1 Like