How to use cv.perspectiveTransform in opencvjs

function matchOfAKAZE(imgClone, tempClone, out) {
  const inputImage = cv.imread(imgClone);
  const templateImage = cv.imread(tempClone);

  // 创建AKAZE特征提取器
  const akaze = new cv.AKAZE();

  // 提取特征点和描述符
  const keypointsInput = new cv.KeyPointVector();
  akaze.detect(inputImage, keypointsInput);
  const keypointsTemplate = new cv.KeyPointVector();
  akaze.detect(templateImage, keypointsTemplate);

  const descriptorsInput = new cv.Mat();
  akaze.compute(inputImage, keypointsInput, descriptorsInput);

  const descriptorsTemplate = new cv.Mat();
  akaze.compute(templateImage, keypointsTemplate, descriptorsTemplate);

  // 创建Brute-Force匹配器
  const bf = new cv.BFMatcher();

  // 使用匹配器进行特征匹配
  const matches = new cv.DMatchVectorVector();
  bf.knnMatch(descriptorsInput, descriptorsTemplate, matches, 2);

  // 过滤匹配
  const goodMatches = [];
  for (let i = 0; i < matches.size(); i++) {
    const matchList = matches.get(i);
    const match1 = matchList.get(0);
    const match2 = matchList.get(1);
    if (match1.distance < 0.75 * match2.distance) {
      goodMatches.push(match1);
    }
  }
  // 获取匹配位置
  const matchedPointsInput = new cv.PointVector();
  const matchedPointsTemplate = new cv.PointVector();
  for (const match of goodMatches) {
    const { queryIdx, trainIdx } = match;
    matchedPointsInput.push_back(keypointsInput.get(queryIdx).pt);
    matchedPointsTemplate.push_back(keypointsTemplate.get(trainIdx).pt);
  }
  // 创建源点和目标点的 Mat 对象
  const matchedPointsInputMat = new cv.Mat(matchedPointsInput.size(), 2, cv.CV_32F);
  const matchedPointsTemplateMat = new cv.Mat(matchedPointsTemplate.size(), 2, cv.CV_32F);

  // 将匹配的点坐标复制到相应的 Mat 对象中
  for (let i = 0; i < matchedPointsInput.size(); i++) {
    const inputPoint = matchedPointsInput.get(i);
    const templatePoint = matchedPointsTemplate.get(i);

    matchedPointsInputMat.data32F[i * 2] = inputPoint.x;
    matchedPointsInputMat.data32F[i * 2 + 1] = inputPoint.y;

    matchedPointsTemplateMat.data32F[i * 2] = templatePoint.x;
    matchedPointsTemplateMat.data32F[i * 2 + 1] = templatePoint.y;
  }
  // 使用RANSAC算法估计变换矩阵
  const ransacThresh = 1.5;
  const H = cv.findHomography(
    matchedPointsTemplateMat,
    matchedPointsInputMat,
    cv.RANSAC,
    ransacThresh,
  );
  // // 获取匹配后的图像
  // const outputSize = new cv.Size(inputImage.cols, inputImage.rows);
  // const outputImage = new cv.Mat(outputSize, inputImage.type());

  // cv.warpPerspective(
  //   templateImage,
  //   outputImage,
  //   H,
  //   outputSize,
  //   cv.INTER_LINEAR,
  //   cv.BORDER_CONSTANT,
  //   new cv.Scalar(255, 255, 255, 255),
  // );

  // cv.imshow(out, outputImage);
  // return;
  const H32F = new cv.Mat();
  H.convertTo(H32F, cv.CV_32F);
  // 获取匹配区域的四个角点
  const h = templateImage.rows;
  const w = templateImage.cols;
  const pointsArray = [
    [0, 0],
    [0, h - 1],
    [w - 1, h - 1],
    [w - 1, 0],
  ];
  // 创建 cv.Mat 对象
  const mat = cv.matFromArray(pointsArray[0].length, pointsArray.length, cv.CV_32F, pointsArray);
  const dst = new cv.Mat();
  // 在执行cv.perspectiveTransform之前添加以下检查

  // 检查mat和H矩阵的维度和数据类型
  if (mat.rows !== 2 || mat.type() !== cv.CV_32F) {
    console.error('mat矩阵维度或数据类型不正确');
    return;
  }

  if (H32F.rows !== 3 || H32F.cols !== 3 || H32F.type() !== cv.CV_32F) {
    console.error('H矩阵维度或数据类型不正确');
    console.log(H.type(), cv.CV_32F);
    return;
  }

  // 打印匹配点数量
  console.log('匹配点数量:', matchedPointsInput.size());

  cv.perspectiveTransform(mat, dst, H32F);
  // 在输入图像上绘制匹配区域的矩形框
  const inputImageWithRect = inputImage.clone();
  console.log(inputImageWithRect);
  cv.polylines(inputImageWithRect, [dst], true, new cv.Scalar(0, 255, 0, 255), 2);

  // 显示结果
  cv.imshow(out, inputImageWithRect);
}

When I run cv.perspectiveTransform(mat, dst, H32F); it throws a numeric error code that I can’t understand
image