Hello, I am working on stitching panorama images using Python .It’s for my internship. My image is not getting properly stitched, as the images get stitched the quality of images degrades, images get stitched randomly. Also the orientation of the camera plays an important role here , if I capture images in landscape panorama, it sometimes appears good but for portraits it becomes a mess. Please, I need some help as I want to learn SO…
I’ll provide my python code.can someone lookout and provide help.
import cv2
import numpy as np
import os
cv2.ocl.setUseOpenCL(False)
import warnings
warnings.filterwarnings("ignore")
feature_extraction_algo = 'sift'
feature_to_match = 'knn'
# Load images from folder
def load_images_from_folder(folder):
images = []
for filename in os.listdir(folder):
img = cv2.imread(os.path.join(folder, filename))
if img is not None:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
images.append(img)
return images
# Initialize feature detector
def select_descriptor_methods(image, method=None):
assert method is not None, "Please define a feature descriptor method. Accepted values are: 'sift', 'surf'"
if method == 'sift':
descriptor = cv2.SIFT_create()
elif method == 'surf':
descriptor = cv2.SURF_create()
elif method == 'brisk':
descriptor = cv2.BRISK_create()
elif method == 'orb':
descriptor = cv2.ORB_create()
keypoints, features = descriptor.detectAndCompute(image, None)
return keypoints, features
# Create matching object
def create_matching_object(method, crossCheck):
if method in ['sift', 'surf']:
return cv2.BFMatcher(cv2.NORM_L2, crossCheck=crossCheck)
elif method in ['orb', 'brisk']:
return cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=crossCheck)
# Keypoints matching
def key_points_matching(features_train_img, features_query_img, method):
bf = create_matching_object(method, crossCheck=True)
best_matches = bf.match(features_train_img, features_query_img)
return sorted(best_matches, key=lambda x: x.distance)
def key_points_matching_KNN(features_train_img, features_query_img, ratio, method):
bf = create_matching_object(method, crossCheck=False)
rawMatches = bf.knnMatch(features_train_img, features_query_img, k=2)
matches = [m for m, n in rawMatches if m.distance < n.distance * ratio]
return matches
def homography_stitching(keypoints_train_img, keypoints_query_img, matches, reprojThresh):
keypoints_train_img = np.float32([keypoint.pt for keypoint in keypoints_train_img])
keypoints_query_img = np.float32([keypoint.pt for keypoint in keypoints_query_img])
if len(matches) > 4:
points_train = np.float32([keypoints_train_img[m.queryIdx] for m in matches]).reshape(-1, 1, 2)
points_query = np.float32([keypoints_query_img[m.trainIdx] for m in matches]).reshape(-1, 1, 2)
H, status = cv2.findHomography(points_train, points_query, cv2.RANSAC, reprojThresh)
return matches, H, status
else:
return None
# Initialize with the first two images
def stitch_images(images, method, feature_to_match):
panorama = images[0]
panorama_gray = cv2.cvtColor(panorama, cv2.COLOR_RGB2GRAY)
for i in range(1, len(images)):
train_photo = images[i]
train_photo_gray = cv2.cvtColor(train_photo, cv2.COLOR_RGB2GRAY)
keypoints_train_img, features_train_img = select_descriptor_methods(train_photo_gray, method=method)
keypoints_panorama_img, features_panorama_img = select_descriptor_methods(panorama_gray, method=method)
print('keypoints_query_image:', keypoints_panorama_img)
for keypoint in keypoints_panorama_img:
x, y = keypoint.pt
size = keypoint.size
orientation = keypoint.angle
response = keypoint.response
octave = keypoint.octave
class_id = keypoint.class_id
print(f"x:{x}, y:{y}, size:{size}, orientation:{orientation}, octave:{octave}, class_id:{class_id}")
if feature_to_match == 'bf':
matches = key_points_matching(features_train_img, features_panorama_img, method=method)
elif feature_to_match == 'knn':
matches = key_points_matching_KNN(features_train_img, features_panorama_img, ratio=0.70, method=method)
mapped_features_image = cv2.drawMatches(train_photo, keypoints_train_img, panorama, keypoints_panorama_img,
np.random.choice(matches, 100), None,
flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
cv2.imshow(f'Mapped Features Image {i}', cv2.cvtColor(mapped_features_image, cv2.COLOR_RGB2BGR))
cv2.waitKey(0)
cv2.destroyWindow(f'Mapped Features Image {i}')
M = homography_stitching(keypoints_train_img, keypoints_panorama_img, matches, reprojThresh=5)
if M is None:
print(f"Homography stitching failed for image {i}")
continue
matches, homography_matrix, status = M
print(f"Homography matrix: {homography_matrix}")
width = panorama.shape[1] + train_photo.shape[1]
height = max(panorama.shape[0], train_photo.shape[0])
result = cv2.warpPerspective(train_photo, homography_matrix, (width, height))
result[0:panorama.shape[0], 0:panorama.shape[1]] = panorama
panorama = result
panorama_gray = cv2.cvtColor(panorama, cv2.COLOR_RGB2GRAY)
cv2.imshow(f'Panorama {i}', cv2.cvtColor(panorama, cv2.COLOR_RGB2BGR))
cv2.imwrite(f'./output/panorama_{i}.jpeg', cv2.cvtColor(panorama, cv2.COLOR_RGB2BGR))
cv2.waitKey(0)
cv2.destroyWindow(f'Panorama {i}')
return panorama
# Load the images
images = load_images_from_folder(r'E:\PycharmProjects\FinalInternship\Calibrated_undistorted_images')
if len(images) < 2:
print("Need at least two images to create a panorama")
else:
result_panorama = stitch_images(images, method=feature_extraction_algo, feature_to_match=feature_to_match)
cv2.imshow("Final Panorama", cv2.cvtColor(result_panorama, cv2.COLOR_RGB2BGR))
cv2.imwrite('./output/final_panorama.jpg', cv2.cvtColor(result_panorama, cv2.COLOR_RGB2BGR))
cv2.waitKey(0)
cv2.destroyAllWindows()