Trying to do image alignment with ORB + FLANN matcher

I was trying to do some image alignment based on a stackoverflow post that I found (that I can’t link to because I can only post two links) and it’s not producing anything useful.

First, here’s my code:

import sys
import cv2
import numpy as np

def removeOverlap(refBW, newBW):
  # invert each
  refBW = 255 - refBW
  newBW = 255 - newBW

  # get absdiff
  xor = cv2.absdiff(refBW, newBW)

  result = cv2.bitwise_and(xor, newBW)

  # invert
  result = 255 - result

  return result

def offset(img, xOffset, yOffset):
  # The number of pixels
  num_rows, num_cols = img.shape[:2]

  # Creating a translation matrix
  translation_matrix = np.float32([ [1,0,xOffset], [0,1,yOffset] ])

  # Image translation
  img_translation = cv2.warpAffine(img, translation_matrix, (num_cols,num_rows), borderValue = (255,255,255))

  return img_translation

imRef = cv2.imread('ref.png', cv2.IMREAD_GRAYSCALE)
im = cv2.imread('new.png', cv2.IMREAD_GRAYSCALE)
 
imNew, h = alignImages(im, imRef)
 
cv2.imwrite('output.png', imNew)

If I run that on https://www.terrafrost.com/ref.png and https://www.terrafrost.com/new.png I get this:

If I dispense with FLANN, however, and replace it with BRUTEFORCE_HAMMING the result I get is waaaaay better. I’d post the image of this but alas I am only able to attach one image…

Regardless, here’s my code with BRUTEFORCE_HAMMING:

import sys
import cv2
import numpy as np
  
GOOD_MATCH_PERCENT = 0.15
 
def alignImages(im1, im2):
  # Convert images to grayscale
  #im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
  #im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
 
  # Detect ORB features and compute descriptors.
  orb = cv2.ORB_create(500)
  keypoints1, descriptors1 = orb.detectAndCompute(im1, None)
  keypoints2, descriptors2 = orb.detectAndCompute(im2, None)
 
  # Match features.
  matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
  matches = matcher.match(descriptors1, descriptors2, None)
 
  # Sort matches by score
  matches.sort(key=lambda x: x.distance, reverse=False)
 
  # Remove not so good matches
  numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
  print(matches[numGoodMatches].distance)
  matches = matches[:numGoodMatches]
 
  # Draw top matches
  #imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)
  #cv2.imwrite("matches.jpg", imMatches)
 
  # Extract location of good matches
  points1 = np.zeros((len(matches), 2), dtype=np.float32)
  points2 = np.zeros((len(matches), 2), dtype=np.float32)
 
  for i, match in enumerate(matches):
    points1[i, :] = keypoints1[match.queryIdx].pt
    points2[i, :] = keypoints2[match.trainIdx].pt
 
  # Find homography
  h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
 
  # Use homography
  height, width = im2.shape
  im1Reg = cv2.warpPerspective(im1, h, (width, height))
 
  return im1Reg, h
 
 
 
 
 
 
imRef = cv2.imread('ref.png', cv2.IMREAD_GRAYSCALE)
im = cv2.imread('new.png', cv2.IMREAD_GRAYSCALE)
 
imNew, h = alignImages(im, imRef)
 
cv2.imwrite('output.png', imNew)

Am I using the FLANN matcher incorrectly or is FLANN just not well suited for this kind of thing?

that’s some unusual code.

I won’t bother debugging anything that uses ORB. that’s a lost cause. that type of “distance test” is also not a good idea.

use the samples/python/find_obj.py example. don’t use ORB, use SIFT or AKAZE. use FLANN, not bruteforce. make sure that Lowe’s ratio test happens. make sure findHomography happens.