Panorama creation from video using stitcher

Hi,

I am trying to create a panorama from a video. It is a video in which camera translate linearly and captures footage. I want to break this into frames and then stitch the individual frames to get a long panorama. However the final image is blurry towards the left. I think it is beacuase of the overlap quality. How do i go about getting a good stitch for around 30 to 40 images.
I have attached the output image

1 Like

welcome.

please present your source code. I suspect a fundamental problem in your approach but I need to see what you’re doing.

I would recommend posting code as text. use three backticks (a backtick is ` ) to start and end a section of code.

as I suspected, you repeatedly warp part of the picture. that will degrade the image into that blur you see.

have you followed the tutorial?

https://docs.opencv.org/master/d8/d19/tutorial_stitcher.html

This code is in c++ right? How do I use it in python? Also, why does it warp repeatedly? How else would you suggest cumulatively piling the sitch without degrading the output on the left?

import imutils
import cv2
import glob

input_path = "/Users/akshayacharya/Desktop/Panorama/Raw Data/riverside/*.jpg"

# input and sort the images
list_images = glob.glob(input_path)
list_sorted = sorted(list_images)
print(list_sorted)

# output path definition
output_path = "/Users/akshayacharya/Desktop/Panorama/Final Panorama/finalpanoex1.jpg"

# initialize empty list and fill all images
images = []
for image in list_sorted:
    image1 = cv2.imread(image)
    image1 = cv2.resize(image1, (720, 480))
    images.append(image1)

print("Read the images")
# this is the final list to stitch
final = [images[0]]
flag = True
print(len(images))
temp = [images[0]]
print(type(temp))

stitcher = cv2.createStitcher() if imutils.is_cv3() else cv2.Stitcher_create()

i = 0
while(i < len(images)-1):
    (status, stitched) = stitcher.stitch([temp[0], images[i+1]])
    if status == 0:
        final.append(images[i+1])
        print(f"Succesfully stitch {i} to {i+1}")
        i = i+1

        temp[0] = stitched

        continue
    if status != 0:
        print(f"Succesfully could not stitch {i} to {i + 1}")
        for j in range(i+2, len(images)):
            print(f"now trying {i} to {j}")
            (status, stitchedd) = stitcher.stitch([temp[0], images[j]])
            if status == 0:
                print(f"Succesfully managed to stitch {i} to {j}")
                final.append(images[j])
                i=j
                temp[0] = stitchedd
                break
            if status != 0:
                print(f"Oops could not stitch {i} to {j}")
                print(f"Will now see compatibility between {i} and {j+1}")

            continue

        i += 1
    continue
"""
print("Now stitching the final")
(status, stitches) = stitcher.stitch(final)
print(status)

# save it if succesfully stitched
if status == 0:
    # write the output stitched image to disk"""
cv2.imwrite(output_path, temp[0])
'''

you have to pass all pictures to the stitcher at once. it will handle this correctly.

if you need to alter the “pipeline”, you need to consult the documentation. your code suggests that you don’t need that. you only need the basic high-level stitching API, which is very simple to use. follow the tutorial. translate into python as appropriate.

When I pass all images at once, it is not able to stitch. I have changed my approach but it is not able to stitch the entire image. Can you check this code?
Thanks.

import cv2
import numpy as np
import glob
import imutils


# DEFINE THE HELPER FUNCTIONS

def draw_matches(img1, keypoints1, img2, keypoints2, matches):
    r, c = img1.shape[:2]
    r1, c1 = img2.shape[:2]

    # Create a blank image with the size of the first image + second image
    output_img = np.zeros((max([r, r1]), c + c1, 3), dtype='uint8')
    output_img[:r, :c, :] = np.dstack([img1])
    output_img[:r1, c:c + c1, :] = np.dstack([img2])

    # Go over all of the matching points and extract them
    for match in matches:
        img1_idx = match.queryIdx
        img2_idx = match.trainIdx
        (x1, y1) = keypoints1[img1_idx].pt
        (x2, y2) = keypoints2[img2_idx].pt

        # Draw circles on the keypoints
        cv2.circle(output_img, (int(x1), int(y1)), 4, (0, 255, 255), 1)
        cv2.circle(output_img, (int(x2) + c, int(y2)), 4, (0, 255, 255), 1)

        # Connect the same keypoints
        cv2.line(output_img, (int(x1), int(y1)), (int(x2) + c, int(y2)), (0, 255, 255), 1)

    return output_img


def warpImages(img1, img2, H):
    rows1, cols1 = img1.shape[:2]
    rows2, cols2 = img2.shape[:2]

    list_of_points_1 = np.float32([[0, 0], [0, rows1], [cols1, rows1], [cols1, 0]]).reshape(-1, 1, 2)
    temp_points = np.float32([[0, 0], [0, rows2], [cols2, rows2], [cols2, 0]]).reshape(-1, 1, 2)

    # When we have established a homography we need to warp perspective
    # Change field of view
    list_of_points_2 = cv2.perspectiveTransform(temp_points, H)

    list_of_points = np.concatenate((list_of_points_1, list_of_points_2), axis=0)

    [x_min, y_min] = np.int32(list_of_points.min(axis=0).ravel() - 0.5)
    [x_max, y_max] = np.int32(list_of_points.max(axis=0).ravel() + 0.5)

    translation_dist = [-x_min, -y_min]

    H_translation = np.array([[1, 0, translation_dist[0]], [0, 1, translation_dist[1]], [0, 0, 1]])

    output_img = cv2.warpPerspective(img2, H_translation.dot(H), (x_max - x_min, y_max - y_min))
    output_img[translation_dist[1]:rows1 + translation_dist[1], translation_dist[0]:cols1 + translation_dist[0]] = img1
    # print(output_img)

    return output_img

def trim(frame):
    #crop top
    if not np.sum(frame[0]):
        return trim(frame[1:])
    #crop top
    if not np.sum(frame[-1]):
        return trim(frame[:-2])
    #crop top
    if not np.sum(frame[:,0]):
        return trim(frame[:,1:])
    #crop top
    if not np.sum(frame[:,-1]):
        return trim(frame[:,:-2])
    return frame


# End of Funcion definitions

# Main program begins here

# Define input and output paths
input_path = "/Users/akshayacharya/Desktop/Panorama/Bazinga/Test images for final/Trial3/*.jpg"
output_path = "Output/o5.jpg"

# Define whatever variables necessary

input_img = glob.glob(input_path)
img_path = sorted(input_img)
tmp = img_path[0]
flag = True
pano = []
j = 0

for i in range(1, len(img_path)):
    if flag:
        img1 = cv2.imread(tmp, cv2.COLOR_BGR2GRAY)
        img2 = cv2.imread(img_path[i], cv2.COLOR_BGR2GRAY)
        flag = False
    img1 = cv2.resize(img1, (1080, 720), fx=1, fy=1)
    img2 = cv2.imread(img_path[i], cv2.COLOR_BGR2GRAY)
    img2 = cv2.resize(img2, (1080, 720), fx=1, fy=1)

    orb = cv2.ORB_create(nfeatures=2000)

    keypoints1, descriptors1 = orb.detectAndCompute(img1, None)
    keypoints2, descriptors2 = orb.detectAndCompute(img2, None)

    # cv2.imshow('1',cv2.drawKeypoints(img1, keypoints1, None, (255, 0, 255)))
    # cv2.imshow('2',cv2.drawKeypoints(img2, keypoints2, None, (255,255, 255)))
    # cv2.waitKey(0)

    # Create a BFMatcher object.
    # It will find all of the matching keypoints on two images
    bf = cv2.BFMatcher_create(cv2.NORM_HAMMING)

    # Find matching points
    matches = bf.knnMatch(descriptors1, descriptors2, k=2)

    # print("Descriptor of the first keypoint: ")
    # print(descriptors1[0])
    # print(type(matches))

    all_matches = []
    for m, n in matches:
        all_matches.append(m)

    img3 = draw_matches(img1, keypoints1, img2, keypoints2, all_matches[:])
    # v2.imshow('Matches',img3)
    # cv2.waitKey(0)

    # Finding the best matches
    good = []
    for m, n in matches:
        if m.distance < 0.9 * n.distance:
            good.append(m)

    # cv2.imshow('Final1',cv2.drawKeypoints(img1, [keypoints1[m.queryIdx] for m in good], None, (255, 0, 255)))
    # cv2.imshow('Final2',cv2.drawKeypoints(img2, [keypoints2[m.queryIdx] for m in good], None, (255, 0, 255)))
    # cv2.waitKey(0)

    MIN_MATCH_COUNT = 10

    if len(good) > MIN_MATCH_COUNT:
        # Convert keypoints to an argument for findHomography
        src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

        # Establish a homography
        M, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

        result = warpImages(img2, img1, M)
        img1 = result



    if i%10 == 0:
        stitched = img1
        stitched = cv2.copyMakeBorder(stitched, 10, 10, 10, 10,
                                      cv2.BORDER_CONSTANT, (0, 0, 0))
        # convert the stitched image to grayscale and threshold it
        # such that all pixels greater than zero are set to 255
        # (foreground) while all others remain 0 (background)
        gray = cv2.cvtColor(stitched, cv2.COLOR_BGR2GRAY)
        thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]
        # find all external contours in the threshold image then find
        # the *largest* contour which will be the contour/outline of
        # the stitched image
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        c = max(cnts, key=cv2.contourArea)
        # allocate memory for the mask which will contain the
        # rectangular bounding box of the stitched image region
        mask = np.zeros(thresh.shape, dtype="uint8")
        (x, y, w, h) = cv2.boundingRect(c)
        cv2.rectangle(mask, (x, y), (x + w, y + h), 255, -1)
        # create two copies of the mask: one to serve as our actual
        # minimum rectangular region and another to serve as a counter
        # for how many pixels need to be removed to form the minimum
        # rectangular region
        minRect = mask.copy()
        sub = mask.copy()
        # keep looping until there are no non-zero pixels left in the
        # subtracted image
        while cv2.countNonZero(sub) > 0:
            # erode the minimum rectangular mask and then subtract
            # the thresholded image from the minimum rectangular mask
            # so we can count if there are any non-zero pixels left
            minRect = cv2.erode(minRect, None)
            sub = cv2.subtract(minRect, thresh)
        # find contours in the minimum rectangular mask and then
        # extract the bounding box (x, y)-coordinates
        cnts = cv2.findContours(minRect.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        c = max(cnts, key=cv2.contourArea)
        (x, y, w, h) = cv2.boundingRect(c)
        # use the bounding box coordinates to extract the our final
        # stitched image
        stitched = stitched[y:y + h, x:x + w]
        # cv2.imwrite("cropped.jpg", stitched)
        # writeStatus = cv2.imwrite(output_path, stitched)
        # if writeStatus is True:
        #    print("image written")
        # else:
        #    print("problem")  # or raise exception, handle problem, etc.
        stitched = cv2.resize(stitched, (1080, 720))

        pano.append(stitched)
        try:
            img1 = cv2.imread(img_path[i+1])
        except:
            continue

stitcher = cv2.createStitcher() if imutils.is_cv3() else cv2.Stitcher_create()
(status, stitchedd) = stitcher.stitch(pano)
x = 0

final_image = cv2.hconcat(pano)
cv2.imshow("Final", final_image)
cv2.waitKey(0)
#cv2.imwrite(output_path, final_image)

I have stitched 10 images at a time as when i try for the entire set, Im not getting output. Once i create these smaller panoramas of 10 frames each, I wan to combine them to get the final image. But there seems to be an issue.


Can you help me find a better way to go about it?

There’re something, but is not improvement.

Change this:

 output_img[:r, :c, :] = np.dstack([img1,img1,img1])
 output_img[:r1, c:c + c1, :] = np.dstack([img2,img2,img2]

And remove fourth parameter::

M, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC)

Try and test it. Unfortunately, there are more to changes.
Can u post original images?

Hi, theres some improvement in a new approach that I have tried. Can you go through my code? Im creating a bunch of smaller panoramas . Its coming quite decent but I want to stitch those smaller panoramas to get my final one. Can you see where Im going wrong.

import cv2
import numpy as np
import glob
import imutils


# DEFINE THE HELPER FUNCTIONS

def draw_matches(img1, keypoints1, img2, keypoints2, matches):
    r, c = img1.shape[:2]
    r1, c1 = img2.shape[:2]

    # Create a blank image with the size of the first image + second image
    output_img = np.zeros((max([r, r1]), c + c1, 3), dtype='uint8')
    output_img[:r, :c, :] = np.dstack([img1])
    output_img[:r1, c:c + c1, :] = np.dstack([img2])

    # Go over all of the matching points and extract them
    for match in matches:
        img1_idx = match.queryIdx
        img2_idx = match.trainIdx
        (x1, y1) = keypoints1[img1_idx].pt
        (x2, y2) = keypoints2[img2_idx].pt

        # Draw circles on the keypoints
        cv2.circle(output_img, (int(x1), int(y1)), 4, (0, 255, 255), 1)
        cv2.circle(output_img, (int(x2) + c, int(y2)), 4, (0, 255, 255), 1)

        # Connect the same keypoints
        cv2.line(output_img, (int(x1), int(y1)), (int(x2) + c, int(y2)), (0, 255, 255), 1)

    return output_img


def warpImages(img1, img2, H):
    rows1, cols1 = img1.shape[:2]
    rows2, cols2 = img2.shape[:2]

    list_of_points_1 = np.float32([[0, 0], [0, rows1], [cols1, rows1], [cols1, 0]]).reshape(-1, 1, 2)
    temp_points = np.float32([[0, 0], [0, rows2], [cols2, rows2], [cols2, 0]]).reshape(-1, 1, 2)

    # When we have established a homography we need to warp perspective
    # Change field of view
    list_of_points_2 = cv2.perspectiveTransform(temp_points, H)

    list_of_points = np.concatenate((list_of_points_1, list_of_points_2), axis=0)

    [x_min, y_min] = np.int32(list_of_points.min(axis=0).ravel() - 0.5)
    [x_max, y_max] = np.int32(list_of_points.max(axis=0).ravel() + 0.5)

    translation_dist = [-x_min, -y_min]

    H_translation = np.array([[1, 0, translation_dist[0]], [0, 1, translation_dist[1]], [0, 0, 1]])

    output_img = cv2.warpPerspective(img2, H_translation.dot(H), (x_max - x_min, y_max - y_min))
    output_img[translation_dist[1]:rows1 + translation_dist[1], translation_dist[0]:cols1 + translation_dist[0]] = img1
    # print(output_img)

    return output_img

def trim(frame):
    #crop top
    if not np.sum(frame[0]):
        return trim(frame[1:])
    #crop top
    if not np.sum(frame[-1]):
        return trim(frame[:-2])
    #crop top
    if not np.sum(frame[:,0]):
        return trim(frame[:,1:])
    #crop top
    if not np.sum(frame[:,-1]):
        return trim(frame[:,:-2])
    return frame


# End of Funcion definitions

# Main program begins here

# Define input and output paths
input_path = "/Users/akshayacharya/Desktop/Panorama/Bazinga/Test images for final/Highfps2fps/*.jpg"
output_path = "Output/o5.jpg"

# Define whatever variables necessary

input_img = glob.glob(input_path)
img_path = sorted(input_img)
for i in range(0,len(img_path)):
    img = cv2.imread(img_path[i])
    img = cv2.resize(img,(400,400))
    cv2.imwrite(img_path[i],img)
tmp = img_path[0]
flag = True
pano = []
j = 1

for i in range(1, len(img_path)):
    if flag:
        img1 = cv2.imread(tmp, cv2.COLOR_BGR2GRAY)
        img2 = cv2.imread(img_path[i], cv2.COLOR_BGR2GRAY)
        flag = False
    img1 = cv2.resize(img1, (0,0), fx=1, fy=1)
    img2 = cv2.imread(img_path[i], cv2.COLOR_BGR2GRAY)
    img2 = cv2.resize(img2, (0,0), fx=1, fy=1)

    orb = cv2.ORB_create(nfeatures=2000)

    keypoints1, descriptors1 = orb.detectAndCompute(img1, None)
    keypoints2, descriptors2 = orb.detectAndCompute(img2, None)

    # cv2.imshow('1',cv2.drawKeypoints(img1, keypoints1, None, (255, 0, 255)))
    # cv2.imshow('2',cv2.drawKeypoints(img2, keypoints2, None, (255,255, 255)))
    # cv2.waitKey(0)

    # Create a BFMatcher object.
    # It will find all of the matching keypoints on two images
    bf = cv2.BFMatcher_create(cv2.NORM_HAMMING)

    # Find matching points
    matches = bf.knnMatch(descriptors1, descriptors2, k=2)

    # print("Descriptor of the first keypoint: ")
    # print(descriptors1[0])
    # print(type(matches))

    all_matches = []
    for m, n in matches:
        all_matches.append(m)

    img3 = draw_matches(img1, keypoints1, img2, keypoints2, all_matches[:])
    # v2.imshow('Matches',img3)
    # cv2.waitKey(0)

    # Finding the best matches
    good = []
    for m, n in matches:
        if m.distance < 0.9 * n.distance:
            good.append(m)

    # cv2.imshow('Final1',cv2.drawKeypoints(img1, [keypoints1[m.queryIdx] for m in good], None, (255, 0, 255)))
    # cv2.imshow('Final2',cv2.drawKeypoints(img2, [keypoints2[m.queryIdx] for m in good], None, (255, 0, 255)))
    # cv2.waitKey(0)

    MIN_MATCH_COUNT = 15

    if len(good) > MIN_MATCH_COUNT:
        # Convert keypoints to an argument for findHomography
        src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

        # Establish a homography
        M, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

        result = warpImages(img2, img1, M)
        img1 = result



    if i%5 == 0:
        stitched = img1
        print(np.shape(stitched))
        stitched = cv2.copyMakeBorder(stitched, 10, 10, 10, 10,
                                      cv2.BORDER_CONSTANT, (0, 0, 0))
        # convert the stitched image to grayscale and threshold it
        # such that all pixels greater than zero are set to 255
        # (foreground) while all others remain 0 (background)
        gray = cv2.cvtColor(stitched, cv2.COLOR_BGR2GRAY)
        thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]
        # find all external contours in the threshold image then find
        # the *largest* contour which will be the contour/outline of
        # the stitched image
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        c = max(cnts, key=cv2.contourArea)
        # allocate memory for the mask which will contain the
        # rectangular bounding box of the stitched image region
        mask = np.zeros(thresh.shape, dtype="uint8")
        (x, y, w, h) = cv2.boundingRect(c)
        cv2.rectangle(mask, (x, y), (x + w, y + h), 255, -1)
        # create two copies of the mask: one to serve as our actual
        # minimum rectangular region and another to serve as a counter
        # for how many pixels need to be removed to form the minimum
        # rectangular region
        minRect = mask.copy()
        sub = mask.copy()
        # keep looping until there are no non-zero pixels left in the
        # subtracted image
        while cv2.countNonZero(sub) > 0:
            # erode the minimum rectangular mask and then subtract
            # the thresholded image from the minimum rectangular mask
            # so we can count if there are any non-zero pixels left
            minRect = cv2.erode(minRect, None)
            sub = cv2.subtract(minRect, thresh)
        # find contours in the minimum rectangular mask and then
        # extract the bounding box (x, y)-coordinates
        cnts = cv2.findContours(minRect.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        c = max(cnts, key=cv2.contourArea)
        (x, y, w, h) = cv2.boundingRect(c)
        # use the bounding box coordinates to extract the our final
        # stitched image
        stitched = stitched[y:y + h, x:x + w]
        # cv2.imwrite("cropped.jpg", stitched)
        # writeStatus = cv2.imwrite(output_path, stitched)
        # if writeStatus is True:
        #    print("image written")
        # else:
        #    print("problem")  # or raise exception, handle problem, etc.
        #stitched = cv2.resize(stitched, (1080, 720))

        pano.append(stitched)
        #stitched = cv2.resize(stitched,(np.shape(stitched)[0],1500))
        #titched1 = trim(stitched)
        cv2.imshow("Stitch", stitched)
        cv2.waitKey(0)
        cv2.imwrite(f"Test images for final/Highfps2fps/temp_pano/frame{j}.jpg", stitched)
        j += 1
        try:
            img1 = cv2.imread(img_path[i+1])
            i= i+1
            img1 = cv2.resize(img1, (400,400))

            print(np.shape(img1))
        except:
            continue

#cv2.imshow("Stitch", stitched)
#cv2.waitKey(0)

final_input = "/Users/akshayacharya/Desktop/Panorama/Bazinga/Test images for final/Highfps2fps/temp_pano/*.jpg"

input_image = glob.glob(final_input)
image_path = sorted(input_image)
for i in range(0,len(image_path)):
    img = cv2.imread(image_path[i])
    img = cv2.resize(img,(1080,720))
    cv2.imwrite(image_path[i],img)

temp = image_path[0]
flag = True

for i in range(1, len(image_path)):
    if flag:
        img1 = cv2.imread(temp, cv2.COLOR_BGR2GRAY)
        img2 = cv2.imread(img_path[i], cv2.COLOR_BGR2GRAY)
        flag = False
    img1 = cv2.resize(img1, (0,0), fx=1, fy=1)
    img2 = cv2.imread(img_path[i], cv2.COLOR_BGR2GRAY)
    img2 = cv2.resize(img2, (0,0), fx=1, fy=1)

    orb = cv2.ORB_create(nfeatures=2000)

    keypoints1, descriptors1 = orb.detectAndCompute(img1, None)
    keypoints2, descriptors2 = orb.detectAndCompute(img2, None)

    # cv2.imshow('1',cv2.drawKeypoints(img1, keypoints1, None, (255, 0, 255)))
    # cv2.imshow('2',cv2.drawKeypoints(img2, keypoints2, None, (255,255, 255)))
    # cv2.waitKey(0)

    # Create a BFMatcher object.
    # It will find all of the matching keypoints on two images
    bf = cv2.BFMatcher_create(cv2.NORM_HAMMING)

    # Find matching points
    matches = bf.knnMatch(descriptors1, descriptors2, k=2)

    # print("Descriptor of the first keypoint: ")
    # print(descriptors1[0])
    # print(type(matches))

    all_matches = []
    for m, n in matches:
        all_matches.append(m)

    img3 = draw_matches(img1, keypoints1, img2, keypoints2, all_matches[:])
    # v2.imshow('Matches',img3)
    # cv2.waitKey(0)

    # Finding the best matches
    good = []
    for m, n in matches:
        if m.distance < 0.9 * n.distance:
            good.append(m)

    # cv2.imshow('Final1',cv2.drawKeypoints(img1, [keypoints1[m.queryIdx] for m in good], None, (255, 0, 255)))
    # cv2.imshow('Final2',cv2.drawKeypoints(img2, [keypoints2[m.queryIdx] for m in good], None, (255, 0, 255)))
    # cv2.waitKey(0)

    MIN_MATCH_COUNT = 10

    if len(good) > MIN_MATCH_COUNT:
        # Convert keypoints to an argument for findHomography
        src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

        # Establish a homography
        M, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

        result = warpImages(img2, img1, M)
        img1 = result

cv2.imshow("Comeon", img1)
cv2.waitKey(0)

I cant attach more than one image as I am a new user, I will upload panorama 2 and the final panorama separately.

1 Like


This is panorama 2


As you can see, a few objects are missing in between during overlap. Why is this happening?

Did you ever come up with a solution to the stitching issue? I am having the same issue.

the cause: recursively using warpAffine. that code makes a copy of a copy of a copy. degradation happens. don’t blame the API, blame the user code and the bad tutorials these people follow.

you can use that warpPerspective API without such recursion. do not blame the API. you just have to think about it a little.

using that API for stitching is a bad idea though. terrible. it’s not for stitching. nobody in their right mind uses it for stitching.

you should use the stitching module in OpenCV. it does things properly.