I am creating real time object detection with locate the difference between database image and real time image using sift with opencv
#object detection based on feature match using knn, sift, opencv
import cv2
import numpy as np
import os
MIN_MATCH_COUNT = 30
detector = cv2.xfeatures2d.SIFT_create()
FLANN_INDEX_KDITREE = 0
flannParam = dict(algorithm=FLANN_INDEX_KDITREE, tree=5)
flann = cv2.FlannBasedMatcher(flannParam, {})
trainImg = cv2.imread("crop image0.jpeg", 0)
trainKP, trainDesc = detector.detectAndCompute(trainImg, None)
cam = cv2.VideoCapture(0)
while True:
ret, QueryImgBGR = cam.read()
height, width = QueryImgBGR.shape[:2]
# Define ROI Box Dimensions
top_left_x = int(width / 3)
top_left_y = int((height / 2) + (height / 4))
bottom_right_x = int((width / 3) * 2)
bottom_right_y = int((height / 2) - (height / 4))
cv2.rectangle(QueryImgBGR, (top_left_x, top_left_y), (bottom_right_x, bottom_right_y), 255, 3)
cropped = QueryImgBGR[bottom_right_y:top_left_y, top_left_x:bottom_right_x]
QueryImg = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY)
queryKP, queryDesc = detector.detectAndCompute(QueryImg, None)
matches = flann.knnMatch(queryDesc, trainDesc, k=2)
goodMatch = []
for m, n in matches:
if (m.distance < 0.75 * n.distance):
goodMatch.append(m)
if (len(goodMatch) > MIN_MATCH_COUNT):
tp = []
qp = []
for m in goodMatch:
tp.append(trainKP[m.trainIdx].pt)
qp.append(queryKP[m.queryIdx].pt)
tp, qp = np.float32((tp, qp))
H, status = cv2.findHomography(tp, qp, cv2.RANSAC, 3.0)
h, w = trainImg.shape
trainBorder = np.float32([[[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]])
queryBorder = cv2.perspectiveTransform(trainBorder, H)
cv2.rectangle(QueryImgBGR, (top_left_x, top_left_y), (bottom_right_x, bottom_right_y), (0, 255, 0), 3)
cv2.polylines(cropped, [np.int32(queryBorder)], True, (0, 0, 255), 3)
cv2.putText(QueryImgBGR, 'Object Found', (50, 50), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 255, 0), 2)
else:
cv2.rectangle(cropped, (top_left_x, top_left_y), (bottom_right_x, bottom_right_y), (255, 0, 0), 3)
cv2.putText(QueryImgBGR, 'Obj not Found', (50, 50), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255), 2)
print("Not Enough match found- %d/%d" % (len(goodMatch), MIN_MATCH_COUNT))
cv2.imshow('result', QueryImgBGR)
if cv2.waitKey(10) == ord('q'):
break
cam.release()
cv2.destroyAllWindows()