How to compile an opencvjs file with SIFT algorithm from opencv

When I use cv.SIFT, I will get the error cv.SIFT is undefined. I think it is because the xFeatured interface is not provided in the official opencvjs file.

related post

no. SIFT was moved back to the (main) features2d module with 4.x, you dont need xfeatures2d or any of the contrib modules.

IF you’re able to rebuild it from src, try to add SIFT to the whitelist, in the same way it’s done for AKAZE or ORB:

Thank you for your reply! Can you provide me an opencv.js code that contains SIFT algorithm and FlannBasedMatcher? I can’t compile opencv.js code that can be used through Emscripten, thank you very much

no, sorry.

FlannBasedMatcher

you could try to make one from the DescriptorMatcher class, like:

m = new cv.DescriptorMatcher("FlannBased")

(yea, weird, needs a ’ new’ instead of the expected ‘create’ !!)

also, if i may ask – what’s your ‘use-case’ for this ? which problem are you trying to solve ?

function matchOfSIFT(imgClone, tempClone, out) {
// 读取输入图像和模板图像
  const inputImage = cv.imread(imgClone);
  const templateImage = cv.imread(tempClone);
  // 创建SIFT特征提取器
  const sift = cv.SIFT();

  // 提取特征点和描述符
  const keypointsInput = new cv.KeyPointVector();
  const descriptorsInput = new cv.Mat();
  sift.detectAndCompute(inputImage, new cv.Mat(), keypointsInput, descriptorsInput);

  const keypointsTemplate = new cv.KeyPointVector();
  const descriptorsTemplate = new cv.Mat();
  sift.detectAndCompute(templateImage, new cv.Mat(), keypointsTemplate, descriptorsTemplate);

  // 创建FLANN匹配器
  const flann = new cv.DescriptorMatcher('FlannBased');

  // 使用KNN匹配来获取最佳匹配
  const matches = new cv.DMatchVectorVector();
  flann.knnMatch(descriptorsInput, descriptorsTemplate, matches, 2);

  // 应用比率测试来筛选好的匹配
  const goodMatches = [];
  for (let i = 0; i < matches.size(); i++) {
    const m = matches.get(i).get(0);
    const n = matches.get(i).get(1);
    if (m.distance < 0.75 * n.distance) {
      goodMatches.push(m);
    }
  }

  // 获取匹配位置
  const matchedPointsInput = [];
  const matchedPointsTemplate = [];
  for (let i = 0; i < goodMatches.length; i++) {
    const m = goodMatches[i];
    matchedPointsInput.push(keypointsInput.get(m.queryIdx).pt);
    matchedPointsTemplate.push(keypointsTemplate.get(m.trainIdx).pt);
  }

  // 使用RANSAC算法估计变换矩阵
  const ransacThresh = 5.0;
  const srcPoints = cv.matFromArray(matchedPointsTemplate, cv.CV_32F);
  const dstPoints = cv.matFromArray(matchedPointsInput, cv.CV_32F);
  const H = new cv.Mat();
  cv.findHomography(srcPoints, dstPoints, cv.RANSAC, ransacThresh, H);

  // 获取匹配后的图像
  const outputImage = new cv.Mat();
  cv.warpPerspective(
    templateImage,
    outputImage,
    H,
    inputImage.size(),
    cv.INTER_LINEAR,
    cv.BORDER_CONSTANT,
    new cv.Scalar(),
  );

  // 获取匹配区域的四个角点
  const h = templateImage.rows;
  const w = templateImage.cols;
  const pts = new cv.Mat(4, 1, cv.CV_32FC2);
  pts.data32F.set([0, 0, 0, h - 1, w - 1, h - 1, w - 1, 0]);
  const dst = new cv.Mat();
  cv.perspectiveTransform(pts, dst, H);

  // 在输入图像上绘制匹配区域的矩形框
  const inputImageWithRect = inputImage.clone();
  const dstArray = dst.data32F;
  for (let i = 0; i < 4; i++) {
    const pt1 = new cv.Point(dstArray[i * 2], dstArray[i * 2 + 1]);
    const pt2 = new cv.Point(
      dstArray[((i + 1) % 4) * 2],
      dstArray[((i + 1) % 4) * 2 + 1],
    );
    cv.line(inputImageWithRect, pt1, pt2, new cv.Scalar(0, 255, 0, 255), 2, cv.LINE_AA, 0);
  }

  // 显示结果
  cv.imshow(out, inputImageWithRect);
}

that looks awesome, but does it work ?

no new or create() here ?

what exactly was needed to get SIFT in ?

I want to migrate this Python code to JavaScript, as you mentioned, it’s necessary to manually build OpenCV.js with the SIFT algorithm included for it to work properly.

import cv2
import numpy as np

# 读取输入图像和模板图像
# dog
# input_image = cv2.imread('./Assets/dog.jpg', cv2.IMREAD_COLOR)
# template_image = cv2.imread('./Assets/dog_temp.jpg', cv2.IMREAD_COLOR)
# text
input_image = cv2.imread('./Assets/eng_bw.png', cv2.IMREAD_COLOR)
template_image = cv2.imread('./Assets/eng_bw_temp.png', cv2.IMREAD_COLOR)

# 创建SIFT特征提取器
sift = cv2.SIFT_create()
# 提取特征点和描述符
keypoints_input, descriptors_input = sift.detectAndCompute(input_image, None)
keypoints_template, descriptors_template = sift.detectAndCompute(
    template_image, None)

# 创建FLANN匹配器
flann = cv2.FlannBasedMatcher({'algorithm': 0, 'trees': 5}, {})

# 使用KNN匹配来获取最佳匹配
matches = flann.knnMatch(descriptors_input, descriptors_template, k=2)

# 应用比率测试来筛选好的匹配
good_matches = []
for m, n in matches:
    if m.distance < 0.75 * n.distance:
        good_matches.append(m)

# 获取匹配位置
matched_points_input = [keypoints_input[m.queryIdx].pt for m in good_matches]
matched_points_template = [
    keypoints_template[m.trainIdx].pt for m in good_matches]

# 使用RANSAC算法估计变换矩阵
ransac_thresh = 5.0
H, mask = cv2.findHomography(np.array(matched_points_template), np.array(
    matched_points_input), cv2.RANSAC, ransac_thresh)

# 获取匹配后的图像
output_image = cv2.warpPerspective(
    template_image, H, (input_image.shape[1], input_image.shape[0]))

# 获取匹配区域的四个角点
h, w = template_image.shape[:2]
pts = np.array([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]],
               dtype=float).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, H)

# 在输入图像上绘制匹配区域的矩形框
input_image_with_rect = input_image.copy()
cv2.polylines(input_image_with_rect, [np.int32(
    dst)], isClosed=True, color=(0, 255, 0), thickness=2)

# 显示结果
cv2.imshow('res Image', input_image_with_rect)
cv2.waitKey(0)
cv2.destroyAllWindows()