Why are the ORB features of frames read in a loop different from those extracted individually?

I have a video file A.mp4 and an image B.png.

I use OpenCV to loop through each frame of A.mp4, applying the ORB algorithm to extract its features. Then, I compare these features with those of B.png. I find that, except for the frame that should match (for example, frame 23323), a large number of additional frames are also matched. However, when I extract these extra frames and save them as PNG files, and then apply the same algorithm to match them individually, they don’t match anymore.

I want to know why this happens?

Here is my code:

def compute_slope(pt1, pt2):
    pass


def extract_first_frame(video_path):
    """
    提取视频的第一帧作为模板图片。

    参数:
        video_path (str): 视频文件路径。

    返回:
        第一帧的灰度图像,如果无法提取则返回 None。
    """
    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        print(f"Error: 无法打开模板视频文件 {video_path}")
        return None

    ret, frame = cap.read()
    if not ret:
        print(f"Error: 无法读取模板视频的第一帧 {video_path}")
        cap.release()
        return None

    cap.release()
    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    return gray_frame


def compare_features(kp1, des1, kp2, des2, bf, ratio_thresh, min_matches, slope_threshold):
    """
    对比两组特征,计算平均斜率并判断是否检测到目标图片。

    返回:
        average_slope (float): 平均斜率
        is_detected (bool): 是否检测到目标图片
        good_matches_count (int): 通过比值测试的匹配点数
    """
    # 使用 knnMatch 进行匹配
    knn_matches = bf.knnMatch(des1, des2, k=2)

    # 应用 Lowe 的比值测试
    good_matches = []
    for m_n in knn_matches:
        if len(m_n) != 2:
            continue
        m, n = m_n
        if m.distance < ratio_thresh * n.distance:
            good_matches.append(m)

    # 如果匹配点数超过阈值,计算平均斜率
    if len(good_matches) >= min_matches:
        slopes = []
        for match in good_matches:
            img1_idx = match.queryIdx
            img2_idx = match.trainIdx

            # 获取关键点坐标
            (x1, y1) = kp1[img1_idx].pt
            (x2, y2) = kp2[img2_idx].pt

            # 计算斜率
            slope = compute_slope((x1, y1), (x2, y2))
            slopes.append(slope)

        # 计算平均斜率
        if slopes:
            average_slope = np.mean(slopes)
            is_detected = abs(average_slope) < slope_threshold
            return average_slope, is_detected, len(good_matches)

    return None, False, len(good_matches)


def main():
    """
    对视频的每一帧与指定图片进行 ORB 特征匹配,计算匹配点连线的平均斜率,
    并根据平均斜率确定图片在视频中的位置。
    """
    # === 本地参数定义 ===
    template_video_path = '/Users/mac/Movies/Mac Video Library/Something.The.Lord.Made.2004.1080p.WEBRip.x265-RARBG.mov'  # 视频文件路径
    target_video_path = '/Volumes/Elements//“神迹”.mp4.子剪辑 041.mp4'  # 要检测的图片路径

    nfeatures = 2000  # ORB 特征点数量
    ratio_thresh = 0.65  # Lowe 的比值测试阈值
    min_matches = 50  # 认为检测成功的最小匹配点数
    slope_threshold = 0.1  # 平均斜率阈值,需根据实际情况调整

    # === 提取模板视频的第一帧并处理 ===
    img1_gray = extract_first_frame(target_video_path)
    if img1_gray is None:
        return
    cv2.imwrite(f"frame_target_041.png", img1_gray)

    # === 初始化 ORB 检测器 ===
    orb = cv2.ORB_create(nfeatures=nfeatures)

    # 检测并计算目标图片的关键点和描述符
    kp1, des1 = orb.detectAndCompute(img1_gray, None)
    if des1 is None:
        print("Error: 无法检测到目标图片的描述符。")
        return
    print(f"目标图片检测到 {len(kp1)} 个关键点。")

    # === 初始化 BFMatcher ===
    bf = cv2.BFMatcher(cv2.NORM_HAMMING)

    # === 打开视频文件 ===
    cap = cv2.VideoCapture(template_video_path)
    if not cap.isOpened():
        print(f"Error: 无法打开视频文件 {template_video_path}")
        return

    frame_index = 0
    detected_frames = []

    while True:
        success, frame = cap.read()
        if not success:
            break  # 视频结束

        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # 检测并计算当前帧的关键点和描述符
        kp2, des2 = orb.detectAndCompute(frame_gray, None)
        if des2 is None:
            frame_index += 1
            continue  # 当前帧没有检测到描述符

        # 对比特征并获取结果
        average_slope, is_detected, good_matches_count = compare_features(
            kp1, des1, kp2, des2, bf, ratio_thresh, min_matches, slope_threshold
        )

        if average_slope is not None:
            print(f"帧 {frame_index}: 通过比值测试的匹配点数: {good_matches_count} 平均斜率 = {average_slope:.4f}", end='')
            cv2.imwrite(f"frame_loop_{frame_index}.png", frame_gray)

            # 如果平均斜率小于阈值,认为检测到目标图片
            if is_detected:
                detected_frames.append((frame_index, average_slope))
                print(f" --- 被标记为检测到目标图片。")
            else:
                print('')

        frame_index += 1

    cap.release()

    # 输出检测到的帧信息
    if detected_frames:
        print("\n检测到目标图片的帧数和平均斜率:")
        for idx, avg_slope in detected_frames:
            print(f"帧 {idx}: 平均斜率 = {avg_slope:.4f}")
    else:
        print("\n未在视频中检测到目标图片。")


if __name__ == "__main__":
    main()

import cv2

import test2

# 读取图像(请根据实际路径修改)
img1_path = '/Users/mac/project/python/video_red_check/frame_loop_23329.png'  # 第一张图像路径
img2_path = '/Users/mac/project/python/video_red_check/frame_target_041.png'  # 第二张图像路径

img1_color = cv2.imread(img1_path)  # 读取彩色图像用于显示
img2_color = cv2.imread(img2_path)

img1 = cv2.cvtColor(img1_color, cv2.COLOR_BGR2GRAY)
img2 = cv2.cvtColor(img2_color, cv2.COLOR_BGR2GRAY)

orb = cv2.ORB_create(nfeatures=2000)
bf = cv2.BFMatcher(cv2.NORM_HAMMING)

kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)

average_slope, is_detected, good_matches_count = test2.compare_features(kp1, des1, kp2, des2, bf, 0.65, 50, 0.1)
print(f'{average_slope}, {is_detected}, {good_matches_count}')

After I extract the two images as PNG files individually, the issue is gone, and they no longer match.

After looking at some posts about image similarity, I realized I need to provide more clarification. B.png is an image extracted from A.mp4, with slight adjustments made to its brightness. I want to accurately determine the specific location of B.png in A.mp4, such as the exact frame where it appears.