[opencv.js] guidance on most appropriate use of cv.findContours

I am attempting to crop a coin out of an image (as consistently as possible independent of the image uploaded–realizing that some images just will not work). My test case is here iCollect

Here is my current methodology that is NOT consistent (reference Step 1 below):

cropImage = function(el) {

            let imageOriginal = cv.imread('imageDestination');
            let imageGray = new cv.Mat();
            let imageDestination = new cv.Mat();
            let imageDestination_all_contours = new cv.Mat();
            let imageDestination_masked = new cv.Mat();
            let imageDestination_cropped = new cv.Mat();
            let imageDestination_resized = new cv.Mat();
            let imageMorphed = new cv.Mat();
            let contours = new cv.MatVector();
            let hierarchy = new cv.Mat();
            let Color_red = new cv.Scalar(255, 0, 0, 255);
            let Color_white = new cv.Scalar(255, 255, 255, 255);

            //convert original BGR image to GRAY
            cv.cvtColor(imageOriginal, imageGray, cv.COLOR_BGR2GRAY);

            //Threshold the image https://docs.opencv.org/3.4/d7/dd0/tutorial_js_thresholding.html
            cv.threshold(imageGray, imageDestination, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU)[1];

            // apply morphology Open and Close https://docs.opencv.org/master/d4/d76/tutorial_js_morphological_ops.html
            kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, new cv.Size(3,3));

            // MORPH_OPEN = removes noise (erosion then dilation)
            cv.morphologyEx(imageDestination, imageMorphed, cv.MORPH_OPEN, kernel);

            kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, new cv.Size(21,21));

            // MORPH_CLOSE - closes small holes inside the foreground objects, or small black points on the object (dilation then erosion)
            cv.morphologyEx(imageMorphed, imageMorphed, cv.MORPH_CLOSE, kernel);

            //Step 1: Find the Contours
            //Good reference: https://docs.opencv.org/master/d0/d43/tutorial_js_table_of_contents_contours.html
            //Good reference: https://docs.opencv.org/master/d4/d73/tutorial_py_contours_begin.html
            //RETR_LIST - Retrieves all the contours, but doesn't create any parent-child relationship.
            //RETR_EXTERNAL - Returns only extreme outer contour
            //RETR_CCOMP - Retrieves all the contours and arranges them to a 2-level hierarchy.
            //RETR_TREE - Retrieves all the contours and creates a full family hierarchy list.
            //CHAIN_APPROX_NONE - Stores all the contour points. That is, any 2 subsequent points (x1,y1) and (x2,y2) of the contour will be either horizontal, vertical or diagonal neighbors
            //CHAIN_APPROX_SIMPLE - Compresses horizontal, vertical, and diagonal segments and leaves only their end points. For example, an up-right rectangular contour is encoded with 4 points
            cv.findContours(imageMorphed, contours, hierarchy, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE);

            //Ignore any contour that looks like it's the entire image
            area_of_75_percent_of_entire_image = (imageOriginal.rows * imageOriginal.cols) - (imageOriginal.rows * imageOriginal.cols) * .25;
            console.log("entire area: ", area_of_75_percent_of_entire_image);

            //find the contour that looks like a coin
            let area_max =0;
            let i_max = 0;
            let cnt_max = 0;
            var ii = 0;
            for (let i = 0; i < contours.size(); i++) {
                let cnt = contours.get(i);
                let area = cv.contourArea(cnt, false);
                console.log("i: ", i, " area: ", area, " area_max: ", area_max, " delta: ", area/1000000);
                if((area >= area_max) && (area < area_of_75_percent_of_entire_image)){
                    console.log("winner: (", i, ") ", area, " >= ", area_max)
                    area_max = area;
                    i_max = i;
                    cnt_max = cnt;
                    ii = i;
                }
            }

            //no max contour found above
            if(ii === 0){
                cnt_max = contours.get(0);
            }

            imageDestination_all_contours = imageOriginal.clone();
            cv.drawContours(imageDestination_all_contours, contours, -1, Color_red, 3);
            cv.imshow('imageDestination_all_contours', imageDestination_all_contours);
            $('#messagetouser_all_contours').html("area_max: " + area_max + " i_max: " + i_max + " ii: " + ii);

            //Step 2: Mask the contour selected
            let rotatedRect = cv.fitEllipse(cnt_max); //a rectangle around the largest contour
            cv.ellipse1(imageOriginal, rotatedRect, Color_white, 1, cv.LINE_8);
            let mask = new cv.Mat.ones(imageOriginal.size(), cv.CV_8UC3);
            cv.ellipse1(mask, rotatedRect, Color_white, -1, cv.LINE_8);
            cv.cvtColor(mask, mask, cv.COLOR_BGR2GRAY);
            cv.bitwise_and(imageOriginal, imageOriginal, imageDestination_masked, mask);
            cv.imshow('imageDestination_masked', mask);
            $('#messagetouser_masked').html("rows: " + mask.rows + " cols: " + mask.cols + " type: " + mask.type() + " depth: " + mask.depth() + " channels: " + mask.channels());

            //Step 3: Crop the contour
            imageDestination_cropped = imageDestination_masked.clone();
            let boundingRect = cv.boundingRect(cnt_max);
            let point1 = new cv.Point(boundingRect.x, boundingRect.y);
            let point2 = new cv.Point(boundingRect.x + boundingRect.width, boundingRect.y + boundingRect.height);
            cv.rectangle(imageDestination_cropped, point1, point2, new cv.Scalar(255, 255, 255, 0), 0, cv.LINE_8, 0);
            cv.imshow('imageDestination_cropped', imageDestination_cropped);
            $('#messagetouser_cropped').html("rows: " + imageDestination_cropped.rows + " cols: " + imageDestination_cropped.cols + " type: " + imageDestination_cropped.type() + " depth: " + imageDestination_cropped.depth() + " channels: " + imageDestination_cropped.channels());

            //Step 4: Resize the contour
            let dsize = new cv.Size(1000, 1000);
            cv.resize(imageDestination_cropped, imageDestination_resized, dsize, 0, 0, cv.INTER_AREA);
            cv.imshow('imageDestination_resized', imageDestination_resized);
            $('#messagetouser_resized').html("rows: " + imageDestination_resized.rows + " cols: " + imageDestination_resized.cols + " type: " + imageDestination_resized.type() + " depth: " + imageDestination_resized.depth() + " channels: " + imageDestination_resized.channels());

            imageOriginal.delete();
            imageDestination.delete();
            imageDestination_all_contours.delete();
            imageDestination_masked.delete();
            imageDestination_cropped.delete();
            imageDestination_resized.delete();
            imageMorphed.delete();
            contours.delete();
            hierarchy.delete();

        };

You can easily see how this fails by pressing the test5 button and then the crop button in the link above. Here is the result.

Please let me know if you have any guidance on the best way to approach consistently finding the right contour to crop.

thank you!

fitEllipse doesn’t find the minimal enclosing ellipse/circle. it will actually try to fit an ellipse through the contour points you give. if your contour is that noisy, it’ll do what you see.

your segmentation needs improvement. I’d say different threshold, vary the morphology operations (no opening, perhaps just a dilation).

1 Like

Thanks @crackwitz Yep, if you remove the open test 5 works but then others fail

I’ve been able to find a method (either changing the threshold/morph or how I pic the max contour) that makes each picture work independently but I’ve not been able to find what approach will work for the most type of images.

Maybe the answer is to have several different strategies for different types of images and then focus on building something that recognizes what type of image the user uploaded (a different problem that i’ve not yet thought about).

I was hoping that there was just a better way to approach the problem out there…

hmmmm I guess I can’t assume that coins are always round/elliptical so that excludes a bunch of “circle finding” algorithms.

if you always had pictures like of this coin, I’d say the situation is rather easy because it’s so nicely contrasting (background dark, coin bright/reflecting).

to enhance the picture, you could demand the coin be placed on a specifically matte black/dark background, inside a (white) softbox. the softbox light will ensure the coin looks bright (light reflects off the coin).

erosion/dilation/etc could reliably remove small dark spots inside the coin (which seem to have given you trouble here), while keeping the dark background intact.

I guess I’m just reiterating for myself. I don’t think I’ve said anything novel just now.

if you have problematic pictures, do share them. I agree that multiple approaches might be required. or you reward/punish the users until they comply with what’s most convenient to you.

sure, here are 8 I’ve been using in the test use case on iCollect (they align with button 1 thru button 8). These are typical pictures users upload.

test1.png (413×402) (icollect.money)
test2.jpg (1920×1080) (icollect.money)
test3.jpg (76×100) (icollect.money)
test4.jpg (1200×1182) (icollect.money)
test5.png (500×504) (icollect.money)
test6.png (473×656) (icollect.money)
test7.jpg (1920×1080) (icollect.money)
test8.jpg (3264×2448) (icollect.money)

hmmm you could try throwing “saliency” algorithms at those pictures. they basically highlight where interesting/non-background/contrasting stuff is in a picture. that should take care of deciding where a coin is and then also if it’s lighter or darker than the rest of the picture (which way to threshold).

since all of those are quite circular, perhaps also throw a Hough transform for circles at these pictures. it’s gonna have trouble seeing ellipses but if you need that there are extensions and related methods (fast radial symmetry transform, etc)

1 Like

It may be easier if you can find the convex hull after you get the contour and use that to build the mask.

2 Likes

@sanjiv Thank you. It is definitly an improvement in the accuracy. Now I just have to figure out how to find the correct contour to pull out to use. Here is the delta:

I put the hosted script on iCollect if you interested in seeing the delta on a few images.

Here is how I changed the code to test:

cropImage = function(el) {

    let imageOriginal = cv.imread('imageDestination');
    let imageGray = new cv.Mat();
    let imageDestination = new cv.Mat();
    let imageDestination_all_contours = new cv.Mat();
    let imageDestination_masked = new cv.Mat();
    let imageDestination_cropped = new cv.Mat();
    let imageDestination_resized = new cv.Mat();
    let imageDestination_convex_hull = new cv.Mat();
    let imageMorphed = new cv.Mat();
    let contours = new cv.MatVector();
    let hierarchy = new cv.Mat();
    let Color_red = new cv.Scalar(255, 0, 0, 255);
    let Color_white = new cv.Scalar(255, 255, 255, 255);

    //convert original BGR image to GRAY
    cv.cvtColor(imageOriginal, imageGray, cv.COLOR_BGR2GRAY);

    //Threshold the image https://docs.opencv.org/3.4/d7/dd0/tutorial_js_thresholding.html
    cv.threshold(imageGray, imageDestination, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU)[1];

    // apply morphology Open and Close https://docs.opencv.org/master/d4/d76/tutorial_js_morphological_ops.html
    kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, new cv.Size(3,3));

    // MORPH_OPEN = removes noise (erosion then dilation)
    cv.morphologyEx(imageDestination, imageMorphed, cv.MORPH_OPEN, kernel);

    kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, new cv.Size(21,21));

    // MORPH_CLOSE - closes small holes inside the foreground objects, or small black points on the object (dilation then erosion)
    cv.morphologyEx(imageMorphed, imageMorphed, cv.MORPH_CLOSE, kernel);

    //Step 1: Find the Contours
    //Good reference: https://docs.opencv.org/master/d0/d43/tutorial_js_table_of_contents_contours.html
    //Good reference: https://docs.opencv.org/master/d4/d73/tutorial_py_contours_begin.html
    //RETR_LIST - Retrieves all the contours, but doesn't create any parent-child relationship.
    //RETR_EXTERNAL - Returns only extreme outer contour
    //RETR_CCOMP - Retrieves all the contours and arranges them to a 2-level hierarchy.
    //RETR_TREE - Retrieves all the contours and creates a full family hierarchy list.
    //CHAIN_APPROX_NONE - Stores all the contour points. That is, any 2 subsequent points (x1,y1) and (x2,y2) of the contour will be either horizontal, vertical or diagonal neighbors
    //CHAIN_APPROX_SIMPLE - Compresses horizontal, vertical, and diagonal segments and leaves only their end points. For example, an up-right rectangular contour is encoded with 4 points
    cv.findContours(imageMorphed, contours, hierarchy, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE);

    //Ignore any contour that looks like it's the entire image
    area_of_75_percent_of_entire_image = (imageOriginal.rows * imageOriginal.cols) - (imageOriginal.rows * imageOriginal.cols) * .25;
    console.log("entire area: ", area_of_75_percent_of_entire_image);

    //find the contour that looks like a coin
    let area_max =0;
    let i_max = 0;
    let cnt_max = 0;
    var ii = 0;
    for (let i = 0; i < contours.size(); i++) {
        let cnt = contours.get(i);
        let area = cv.contourArea(cnt, false);
        console.log("i: ", i, " area: ", area, " area_max: ", area_max, " delta: ", area/1000000);
        if((area >= area_max) && (area < area_of_75_percent_of_entire_image)){
            console.log("winner: (", i, ") ", area, " >= ", area_max)
            area_max = area;
            i_max = i;
            cnt_max = cnt;
            ii = i;
        }
    }

    //no max contour found above
    if(ii === 0){
        cnt_max = contours.get(0);
    }

    imageDestination_all_contours = imageOriginal.clone();
    cv.drawContours(imageDestination_all_contours, contours, -1, Color_red, 3);
    cv.imshow('imageDestination_all_contours', imageDestination_all_contours);
    $('#messagetouser_all_contours').html("area_max: " + area_max + " i_max: " + i_max + " ii: " + ii);

    //test convex hull. see reference: https://docs.opencv.org/3.4/dc/dcf/tutorial_js_contour_features.html
    let hull_max = 0;
    let hull = new cv.MatVector();
    imageDestination_convex_hull = imageOriginal.clone();
    // approximates each contour to convex hull
    for (let i = 0; i < contours.size(); ++i) {
        let tmp = new cv.Mat();
        let cnt = contours.get(i);

        let area = cv.contourArea(cnt, false);
        console.log("console_hull i: ", i, " area: ", area, " area_max: ", area_max, " delta: ", area/1000000);
        if((area >= area_max) && (area < area_of_75_percent_of_entire_image)){
            console.log("console_hull winner: (", i, ") ", area, " >= ", area_max)
            area_max = area;
            i_max = i;
            cnt_max = cnt;
            ii = i;
            cv.convexHull(cnt, tmp, false, true);
            hull_max = tmp;
            hull.push_back(tmp);
        }
    }

    // draw contours with random Scalar
    for (let i = 0; i < hull.size(); ++i) {
        cv.drawContours(imageDestination_convex_hull, hull, i, Color_red, 3, 8, hierarchy, 0);
    }
    cv.imshow('imageDestination_convex_hull', imageDestination_convex_hull);
    $('#messagetouser_convex_hull').html("rows: " + imageDestination_convex_hull.rows + " cols: " + imageDestination_convex_hull.cols + " type: " + imageDestination_convex_hull.type() + " depth: " + imageDestination_convex_hull.depth() + " channels: " + imageDestination_convex_hull.channels());
    //end test

    //Step 2: Mask the contour selected
    //let rotatedRect = cv.fitEllipse(cnt_max); //a rectangle around the largest contour
    let rotatedRect = cv.fitEllipse(hull_max); //a rectangle around the largest contour
    cv.ellipse1(imageOriginal, rotatedRect, Color_white, 1, cv.LINE_8);
    let mask = new cv.Mat.ones(imageOriginal.size(), cv.CV_8UC3);
    cv.ellipse1(mask, rotatedRect, Color_white, -1, cv.LINE_8);
    cv.cvtColor(mask, mask, cv.COLOR_BGR2GRAY);
    cv.bitwise_and(imageOriginal, imageOriginal, imageDestination_masked, mask);
    cv.imshow('imageDestination_masked', mask);
    $('#messagetouser_masked').html("rows: " + mask.rows + " cols: " + mask.cols + " type: " + mask.type() + " depth: " + mask.depth() + " channels: " + mask.channels());

    //Step 3: Crop the contour
    imageDestination_cropped = imageDestination_masked.clone();
    let boundingRect = cv.boundingRect(cnt_max);
    let point1 = new cv.Point(boundingRect.x, boundingRect.y);
    let point2 = new cv.Point(boundingRect.x + boundingRect.width, boundingRect.y + boundingRect.height);
    cv.rectangle(imageDestination_cropped, point1, point2, new cv.Scalar(255, 255, 255, 0), 0, cv.LINE_8, 0);
    cv.imshow('imageDestination_cropped', imageDestination_cropped);
    $('#messagetouser_cropped').html("rows: " + imageDestination_cropped.rows + " cols: " + imageDestination_cropped.cols + " type: " + imageDestination_cropped.type() + " depth: " + imageDestination_cropped.depth() + " channels: " + imageDestination_cropped.channels());

    //Step 4: Resize the contour
    let dsize = new cv.Size(1000, 1000);
    cv.resize(imageDestination_cropped, imageDestination_resized, dsize, 0, 0, cv.INTER_AREA);
    cv.imshow('imageDestination_resized', imageDestination_resized);
    $('#messagetouser_resized').html("rows: " + imageDestination_resized.rows + " cols: " + imageDestination_resized.cols + " type: " + imageDestination_resized.type() + " depth: " + imageDestination_resized.depth() + " channels: " + imageDestination_resized.channels());

    imageOriginal.delete();
    imageDestination.delete();
    imageDestination_all_contours.delete();
    imageDestination_masked.delete();
    imageDestination_cropped.delete();
    imageDestination_resized.delete();
    imageDestination_convex_hull.delete();
    imageMorphed.delete();
    contours.delete();
    hierarchy.delete();
    cnt.delete();
    tmp.delete();

};

@crackwitz I’ve been testing your idea about saliency against a few other simpler approaches. Here is what I’ve found.

Simpler approaches first (see options 1 - 4 commented out in script below) - they all produce similar results from a findContour perspective. Some a do a better job on one image but not another. It’s a mixed bag. Just a simple threshold is probably the most accurate for most images.

I attempted saliency using this article as a reference. I could not do exactly what the author mentions because pyrMeanShiftFiltering is not available in opencv.js like so many other necessary functions. However, what I did find is that doing the Back-project of the Hue and Saturation histogram of the entire image, on the image itself provided an increase in accuracy in finding a good base to use prior to finding contours. However, the second step the author recommends “Process the back-projection to get a saliency map” reduced the accuracy greatly – you can see where I commented it out but it will show the approach.

For now, I’m going to proceed with this approach as it works with 80% of the images unless you can see any additional ways to continue researching this to gain better accuracy.

again, thank you for all your help. I uploaded the code below so you can see the test at iCollect

The entire function can be seen here:

cropImage = function(el) {

            let imageOriginal = cv.imread('imageDestination');
            let imageGray = new cv.Mat();
            let imageDestination = new cv.Mat();
            let imageDestination_all_contours = new cv.Mat();
            let imageDestination_mask = new cv.Mat();
            let imageDestination_masked = new cv.Mat();
            let imageDestination_cropped = new cv.Mat();
            let imageDestination_resized = new cv.Mat();
            let imageDestination_convex_hull = new cv.Mat();
            let imageDestination_rectangle = new cv.Mat();
            let imageMorphed = new cv.Mat();
            let contours = new cv.MatVector();
            let hierarchy = new cv.Mat();
            let hull = new cv.MatVector();
            let tmp = new cv.Mat();
            let Color_red = new cv.Scalar(255, 0, 0, 255);
            let Color_white = new cv.Scalar(255, 255, 255, 255);
            let area_max =0;
            var ii = 0;
            let hull_max = 0;
            //initialize saliency algorithm variables
            let hsv = new cv.Mat();
            let hsvt = new cv.Mat();
            let mask2 = new cv.Mat();
            let hist = new cv.Mat();
            let srcVec = new cv.MatVector();
            let dstVec = new cv.MatVector();
            let channels = [0, 1];
            let histSize = [2,2];
            let ranges = [0, 180, 0, 256];


/* Prepare Image for to find contours ********************************************************************/

            //convert original BGR image to GRAY
            cv.cvtColor(imageOriginal, imageGray, cv.COLOR_BGR2GRAY);

/*          //option 1: Attempt a Canny algorithm to outline the features that highlight interesting parts of the image that will become contours
            cv.Canny(imageGray, imageMorphed, 50, 500);
            cv.imshow('imageDestination_threshold', imageMorphed);
*/

/*          //option 2: Attempt a simple OPEN/CLOSE morph technique to outline the features that highlight interesting parts of the image that will become contours
            //Threshold the image https://docs.opencv.org/3.4/d7/dd0/tutorial_js_thresholding.html
            cv.threshold(imageGray, imageDestination, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU)[1];
            cv.imshow('imageDestination_threshold', imageDestination);

            // apply morphology Open and Close https://docs.opencv.org/master/d4/d76/tutorial_js_morphological_ops.html
            kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, new cv.Size(3,3));
            // MORPH_OPEN = removes noise (erosion then dilation)
            cv.morphologyEx(imageDestination, imageMorphed, cv.MORPH_OPEN, kernel);
            cv.imshow('imageDestination_morphed1', imageMorphed);
            kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, new cv.Size(21,21));
            // MORPH_CLOSE - closes small holes inside the foreground objects, or small black points on the object (dilation then erosion)
            cv.morphologyEx(imageMorphed, imageMorphed, cv.MORPH_CLOSE, kernel);
            cv.imshow('imageDestination_morphed2', imageMorphed);
*/

/*          //option 3: Attempt a simple threshold to outline the features that highlight interesting parts of the image that will become contours
            cv.threshold(imageGray, imageMorphed, 177, 200, cv.THRESH_BINARY);
            cv.imshow('imageDestination_threshold', imageMorphed);
*/

/*          //option 4: Attempt a drawing algorithm to outline the features that highlight interesting parts of the image that will become contours
            const kernel = cv.getStructuringElement(cv.MORPH_RECT,new cv.Size(5,5));
            cv.dilate(imageGray, imageMorphed, kernel, new cv.Point(-1, 1), 1);
            cv.absdiff(imageMorphed, imageGray, imageMorphed);
            cv.bitwise_not(imageMorphed, imageMorphed);
            cv.threshold(imageMorphed, imageMorphed, 177, 200, cv.THRESH_BINARY);
            cv.imshow('imageDestination_threshold', imageMorphed);
*/

            //option 5: Attempt to use saliency algorithms
            //Reference: https://jacobgil.github.io/computervision/saliency-from-backproj
            //Back-project the Hue, Saturation histogram of the entire image, on the image itself.
            //The back-projection for each channel pixel is just the intensity, divided by how many pixels have that intensity.
            //It’s an attempt to assign each pixel a probability of belonging to the background.
            //We use only 2 bins for each channel. That means we quantize the image into 4 colors (since we have 2 channels).
            //The more levels we use, the more details we get in the saliency map.
            srcVec.push_back(imageOriginal);
            dstVec.push_back(imageOriginal);
            cv.cvtColor(imageOriginal, hsv, cv.COLOR_BGR2HSV);
            cv.cvtColor(imageOriginal, hsvt, cv.COLOR_BGR2HSV);
            // calculating object histogram
            cv.calcHist(srcVec, channels, mask2, hist, histSize, ranges, 1);
            // normalize histogram and apply backprojection
            cv.normalize(hist,hist,0,255,cv.NORM_MINMAX);
            cv.calcBackProject(dstVec,channels,hist,imageMorphed , ranges, 1);
            cv.imshow('imageDestination_threshold', imageMorphed);

            //Problem: The below actually produces less accuracy!
            //Process the back-projection to get a saliency map. Enhance the contrast of the saliency map with histogram equalization,
            //and invert the image. The goal is to produce a smooth saliency map where salient regions have bright pixels.
/*          cv.normalize(imageMorphed,imageMorphed,0,255,cv.NORM_MINMAX);
            let salienciesVec = new cv.MatVector();
            salienciesVec.push_back(imageMorphed);
            salienciesVec.push_back(imageMorphed);
            salienciesVec.push_back(imageMorphed);
            let saliency = new cv.Mat();
            cv.merge(salienciesVec, saliency);
            cv.cvtColor(saliency, saliency, cv.COLOR_BGR2GRAY);
            cv.equalizeHist(saliency, saliency);
            cv.threshold(saliency, imageMorphed, 177, 200, cv.THRESH_BINARY);
            cv.imshow('imageDestination_threshold', imageMorphed);
*/

/* Find the contours ********************************************************************/

            //Good reference: https://docs.opencv.org/master/d0/d43/tutorial_js_table_of_contents_contours.html
            //Good reference: https://docs.opencv.org/master/d4/d73/tutorial_py_contours_begin.html
            //RETR_LIST - Retrieves all the contours, but doesn't create any parent-child relationship.
            //RETR_EXTERNAL - Returns only extreme outer contour
            //RETR_CCOMP - Retrieves all the contours and arranges them to a 2-level hierarchy.
            //RETR_TREE - Retrieves all the contours and creates a full family hierarchy list.
            //CHAIN_APPROX_NONE - Stores all the contour points. That is, any 2 subsequent points (x1,y1) and (x2,y2) of the contour will be either horizontal, vertical or diagonal neighbors
            //CHAIN_APPROX_SIMPLE - Compresses horizontal, vertical, and diagonal segments and leaves only their end points. For example, an up-right rectangular contour is encoded with 4 points
            cv.findContours(imageMorphed, contours, hierarchy, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE);

            imageDestination_all_contours = imageOriginal.clone();
            cv.drawContours(imageDestination_all_contours, contours, -1, Color_red, 3);
            cv.imshow('imageDestination_all_contours', imageDestination_all_contours);
            $('#messagetouser_all_contours').html("Found: " + contours.size() + " contours in image");

/* Review each contour and find the one we think is the coin and get it's convex hull *********************************/

            //Ignore any contour that looks like it's the entire image
            area_of_75_percent_of_entire_image = (imageOriginal.rows * imageOriginal.cols) - (imageOriginal.rows * imageOriginal.cols) * .25;
            console.log("Image area: ", imageOriginal.rows * imageOriginal.cols, " 75% threshold: ", area_of_75_percent_of_entire_image);

            imageDestination_convex_hull = imageOriginal.clone();

            for (let i = 0; i < contours.size(); ++i) {
                let cnt = contours.get(i);
                let area = cv.contourArea(cnt, false);
                //console.log("console_hull i: ", i, " area: ", area, " area_max: ", area_max, " delta: ");
                if((area >= area_max) && (area < area_of_75_percent_of_entire_image)){
                    //console.log("larger area found: (", i, ") ", area, " >= ", area_max)
                    area_max = area;
                    ii = i;
                    //convex hull see reference: https://docs.opencv.org/3.4/dc/dcf/tutorial_js_contour_features.html
                    cv.convexHull(cnt, tmp, false, true);
                    hull_max = tmp;
                }
            }

            hull.push_back(hull_max);

            //no max contour found above
            if(ii === 0){
                console.log("found only 1 contour, using it...")
                hull_max = contours.get(0);
            }

            // draw contours
            for (let i = 0; i < hull.size(); ++i) {
                cv.drawContours(imageDestination_convex_hull, hull, i, Color_red, 3, 8, hierarchy, 0);
            }

            cv.imshow('imageDestination_convex_hull', imageDestination_convex_hull);
            $('#messagetouser_convex_hull').html("reviewed this many contours: " + ii + " in an Image with area: " + imageOriginal.rows * imageOriginal.cols +
                " We only looked at contours that were less than: " + area_of_75_percent_of_entire_image + " or 75% of the image area ");

/* Mask the part of the image we think is the coin *********************************/

            //fit an ellipse around the largest contour
            let rotatedRect = cv.fitEllipse(hull_max);

            //create the mask using the ellipse that is around the contour
            let mask = new cv.Mat.ones(imageOriginal.size(), cv.CV_8UC3);
            cv.ellipse1(mask, rotatedRect, Color_white, -1, cv.LINE_8);
            cv.cvtColor(mask, mask, cv.COLOR_BGR2GRAY);
            cv.imshow('imageDestination_mask', mask);
            $('#messagetouser_mask').html("rows: " + mask.rows + " cols: " + mask.cols + " type: " + mask.type() + " depth: " + mask.depth() + " channels: " + mask.channels());

            //overlay the mask on the image
            cv.bitwise_and(imageOriginal, imageOriginal, imageDestination_masked, mask);
            cv.imshow('imageDestination_masked', imageDestination_masked);
            $('#messagetouser_masked').html("rows: " + imageDestination_masked.rows + " cols: " + imageDestination_masked.cols + " type: " + imageDestination_masked.type() + " depth: " + imageDestination_masked.depth() + " channels: " + imageDestination_masked.channels());

/* Draw a rectangle around the selected ellipse from above *********************************/

            imageDestination_rectangle = imageDestination_masked.clone();
            let x1 = rotatedRect.center.x - (rotatedRect.size.width / 2 ) * 1.03;//extending it 3%, note: this can cause API to fail if number goes negative
            let y1 = rotatedRect.center.y - (rotatedRect.size.height / 2 ) * 1.03;//extending it 3%
            let x2 = rotatedRect.center.x + (rotatedRect.size.width / 2 ) * 1.03;//extending it 3%
            let y2 = rotatedRect.center.y + (rotatedRect.size.height / 2 ) * 1.03;//extending it 3%
            let xpoint1 = new cv.Point(x1, y2);
            let xpoint2 = new cv.Point(x2, y1);
            cv.rectangle(imageDestination_rectangle, xpoint1, xpoint2, Color_red, 3, cv.LINE_8, 0);
            cv.imshow('imageDestination_rectangle', imageDestination_rectangle);
            $('#messagetouser_rectangle').html("rect around image = point1: " + x1 + " : " + y1 + " point2: " + x2  + " : " + y2);

/* Crop the contour out of the image  *********************************/

            imageDestination_cropped = imageDestination_masked.clone();
            let rect = new cv.Rect(x1, y1, x2-x1, y2-y1);
            imageDestination_cropped = imageDestination_cropped.roi(rect); //perform the crop
            cv.imshow('imageDestination_cropped', imageDestination_cropped);
            $('#messagetouser_cropped').html("rect to crop: x1: " + x1 + " y1: " + y1 + " x2: " + (x2-x1) + " y2: " + (y2-y1));

            //Step 4: Resize the contour
            let dsize = new cv.Size(1000, 1000);
            cv.resize(imageDestination_cropped, imageDestination_resized, dsize, 0, 0, cv.INTER_AREA);
            cv.imshow('imageDestination_resized', imageDestination_resized);
            $('#messagetouser_resized').html("rows: " + imageDestination_resized.rows + " cols: " + imageDestination_resized.cols + " type: " + imageDestination_resized.type() + " depth: " + imageDestination_resized.depth() + " channels: " + imageDestination_resized.channels());

            imageOriginal.delete();
            imageDestination.delete();
            imageDestination_all_contours.delete();
            imageDestination_mask.delete();
            imageDestination_masked.delete();
            imageDestination_cropped.delete();
            imageDestination_resized.delete();
            imageDestination_convex_hull.delete();
            imageDestination_rectangle.delete();
            imageMorphed.delete();
            contours.delete();
            hierarchy.delete();
            tmp.delete();
            hsv.delete();
            hsvt.delete();
            mask2.delete();
            hist.delete();
        };

the flavor or saliency you found there looks somewhat “low level”, but if it works, great!

here’s some more that might not be available. this is the stuff I remembered. it’s a bit more fuzzy.

https://docs.opencv.org/master/d8/d65/group__saliency.html

as I understand it, opencv.js has a “whitelist” of modules that translate correctly, as verified by humans. if you think it’s worth the bother, you could investigate and see if any as yet unavailable modules translate correctly as they are, or with any changes you think are necessary.

1 Like

Yes, I went there first but none of the apis are available here. But I found that the article I used with available APIs produced “almost” the same results.

as I get better at opencv, i’ll try to translate a few myself (but i’m a C++ novice).

thanks again.