#!/usr/bin/env python3
import numpy as np
import time
import sys
import rospy
import matplotlib.pyplot as plt
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import cv2
from sklearn.preprocessing import normalize
bridge_object = CvBridge() # create the cv_bridge object
image_received = 0 #Flag to indicate that we have already received an image
cv_image1 = 0 #This is just to create the global variable cv_image
cv_image2 = 0 #This is just to create the global variable cv_image
window_size = 3
x_left = 0
y_left = 0
def show_image():
left_image_sub = rospy.Subscriber("/wamv/sensors/cameras/front_left_camera/image_raw",Image,camera_left_callback)
right_image_sub = rospy.Subscriber("/wamv/sensors/cameras/front_right_camera/image_raw",Image,camera_right_callback)
r = rospy.Rate(10) #10Hz
plt.figure()
global x_left
global y_left
while not rospy.is_shutdown():
if image_received:
# resize image 800x400
cv_image1_resize = cv2.resize(cv_image1,(800,400))
cv_image2_resize = cv2.resize(cv_image2,(800,400))
img1 = cv2.cvtColor(cv_image1_resize, cv2.COLOR_BGR2GRAY)
img2 = cv2.cvtColor(cv_image1_resize, cv2.COLOR_BGR2GRAY)
cv2.imshow('Image from robot left camera', img1)
cv2.imshow('Image from robot right camera', img2)
stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15)
disparity = stereo.compute(img1,img2)
#disparity = stereo.compute(cv_image1_resize,cv_image2_resize)
cv2.imshow('disparity', disparity)
plt.imshow(disparity,'gray')
plt.show()
cv2.waitKey(1)
r.sleep()
cv2.destroyAllWindows()
def camera_left_callback(data):
global bridge_object
global cv_image1
global image_received
image_received=1
try:
# We select bgr8 because its the OpenCV encoding by default
cv_image1 = bridge_object.imgmsg_to_cv2(data,"bgr8")
except CvBridgeError as e:
print(e)
def camera_right_callback(data):
global bridge_object
global cv_image2
global image_received
image_received=1
try:
# We select bgr8 because its the OpenCV encoding by default
cv_image2 = bridge_object.imgmsg_to_cv2(data,"bgr8")
except CvBridgeError as e:
print(e)
if __name__ == '__main__':
rospy.init_node('depth_test', anonymous=True)
show_image()
I’m trying to create a depth map from real-time video, and as you can see, a gray screen appears when cv2.imshow is used, and a somewhat visible phenomenon appears when plt.imshow is used. I would like to know the cause of why only a gray screen keeps appearing when I use cv2.imshow. I thought it was camera calibration, but as far as I know, the camera on the gazebo doesn’t have to be calibrated separately.
In the case of the source code, I cloned the package in the following link to Github to create a node and launch file, catkin_make and then run it!