Note that there are some explanatory texts on larger screens.

plurals
  1. POEMGU CV SURF image match
    primarykey
    data
    text
    <p>I have been working with the SURF feature detection example from the EMGU CV library.</p> <p>So far it's working amazingly; I can detect matching objects between 2 given images but I have run into a problem in regards to when the images do not match.</p> <p>I was looking for support from the forums but they are down from where I am. Would anyone know which parameters determine whether an image is a match or not. When I test with 2 images that are not a match, the code still proceeds as if there was a match and draws a blurred thick red line on a random location of the image even when there is not a match. </p> <p>If there is no match I would wish to break from the code and not proceed further.</p> <p>Appendix:</p> <pre><code> static void Run() { Image&lt;Gray, Byte&gt; modelImage = new Image&lt;Gray, byte&gt;("HatersGonnaHate.png"); Image&lt;Gray, Byte&gt; observedImage = new Image&lt;Gray, byte&gt;("box_in_scene.png"); Stopwatch watch; HomographyMatrix homography = null; SURFDetector surfCPU = new SURFDetector(500, false); VectorOfKeyPoint modelKeyPoints; VectorOfKeyPoint observedKeyPoints; Matrix&lt;int&gt; indices; Matrix&lt;float&gt; dist; Matrix&lt;byte&gt; mask; if (GpuInvoke.HasCuda) { GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f); using (GpuImage&lt;Gray, Byte&gt; gpuModelImage = new GpuImage&lt;Gray, byte&gt;(modelImage)) //extract features from the object image using (GpuMat&lt;float&gt; gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null)) using (GpuMat&lt;float&gt; gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints)) using (GpuBruteForceMatcher matcher = new GpuBruteForceMatcher(GpuBruteForceMatcher.DistanceType.L2)) { modelKeyPoints = new VectorOfKeyPoint(); surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints); watch = Stopwatch.StartNew(); // extract features from the observed image using (GpuImage&lt;Gray, Byte&gt; gpuObservedImage = new GpuImage&lt;Gray, byte&gt;(observedImage)) using (GpuMat&lt;float&gt; gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null)) using (GpuMat&lt;float&gt; gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints)) using (GpuMat&lt;int&gt; gpuMatchIndices = new GpuMat&lt;int&gt;(gpuObservedDescriptors.Size.Height, 2, 1)) using (GpuMat&lt;float&gt; gpuMatchDist = new GpuMat&lt;float&gt;(gpuMatchIndices.Size, 1)) { observedKeyPoints = new VectorOfKeyPoint(); surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints); matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, 2, null); indices = new Matrix&lt;int&gt;(gpuMatchIndices.Size); dist = new Matrix&lt;float&gt;(indices.Size); gpuMatchIndices.Download(indices); gpuMatchDist.Download(dist); mask = new Matrix&lt;byte&gt;(dist.Rows, 1); mask.SetValue(255); Features2DTracker.VoteForUniqueness(dist, 0.8, mask); int nonZeroCount = CvInvoke.cvCountNonZero(mask); if (nonZeroCount &gt;= 4) { nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20); if (nonZeroCount &gt;= 4) homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3); } watch.Stop(); } } } else { //extract features from the object image modelKeyPoints = surfCPU.DetectKeyPointsRaw(modelImage, null); //MKeyPoint[] kpts = modelKeyPoints.ToArray(); Matrix&lt;float&gt; modelDescriptors = surfCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints); watch = Stopwatch.StartNew(); // extract features from the observed image observedKeyPoints = surfCPU.DetectKeyPointsRaw(observedImage, null); Matrix&lt;float&gt; observedDescriptors = surfCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints); BruteForceMatcher matcher = new BruteForceMatcher(BruteForceMatcher.DistanceType.L2F32); matcher.Add(modelDescriptors); int k = 2; indices = new Matrix&lt;int&gt;(observedDescriptors.Rows, k); dist = new Matrix&lt;float&gt;(observedDescriptors.Rows, k); matcher.KnnMatch(observedDescriptors, indices, dist, k, null); mask = new Matrix&lt;byte&gt;(dist.Rows, 1); mask.SetValue(255); Features2DTracker.VoteForUniqueness(dist, 0.8, mask); int nonZeroCount = CvInvoke.cvCountNonZero(mask); if (nonZeroCount &gt;= 4) { nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20); if (nonZeroCount &gt;= 4) homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3); } watch.Stop(); } //Draw the matched keypoints Image&lt;Bgr, Byte&gt; result = Features2DTracker.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints, indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DTracker.KeypointDrawType.NOT_DRAW_SINGLE_POINTS); #region draw the projected region on the image if (homography != null) { //draw a rectangle along the projected model Rectangle rect = modelImage.ROI; PointF[] pts = new PointF[] { new PointF(rect.Left, rect.Bottom), new PointF(rect.Right, rect.Bottom), new PointF(rect.Right, rect.Top), new PointF(rect.Left, rect.Top)}; homography.ProjectPoints(pts); result.DrawPolyline(Array.ConvertAll&lt;PointF, Point&gt;(pts, Point.Round), true, new Bgr(Color.Red), 5); } #endregion ImageViewer.Show(result, String.Format("Matched using {0} in {1} milliseconds", GpuInvoke.HasCuda ? "GPU" : "CPU", watch.ElapsedMilliseconds)); } } } </code></pre> <p>`</p>
    singulars
    1. This table or related slice is empty.
    plurals
    1. This table or related slice is empty.
    1. This table or related slice is empty.
    1. This table or related slice is empty.
 

Querying!

 
Guidance

SQuiL has stopped working due to an internal error.

If you are curious you may find further information in the browser console, which is accessible through the devtools (F12).

Reload