Visual Servoing Platform version 3.7.0
Loading...
Searching...
No Matches
vpKeyPoint.h
1/*
2 * ViSP, open source Visual Servoing Platform software.
3 * Copyright (C) 2005 - 2024 by Inria. All rights reserved.
4 *
5 * This software is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 * See the file LICENSE.txt at the root directory of this source
10 * distribution for additional information about the GNU GPL.
11 *
12 * For using ViSP with software that can not be combined with the GNU
13 * GPL, please contact Inria about acquiring a ViSP Professional
14 * Edition License.
15 *
16 * See https://visp.inria.fr for more information.
17 *
18 * This software was developed at:
19 * Inria Rennes - Bretagne Atlantique
20 * Campus Universitaire de Beaulieu
21 * 35042 Rennes Cedex
22 * France
23 *
24 * If you have questions regarding the use of this file, please contact
25 * Inria at visp@inria.fr
26 *
27 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29 *
30 * Description:
31 * Key point functionalities.
32 */
33#ifndef VP_KEYPOINT_H
34#define VP_KEYPOINT_H
35
36#include <visp3/core/vpConfig.h>
37
38#if defined(VISP_HAVE_OPENCV) && \
39 (((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_CALIB3D) && defined(HAVE_OPENCV_FEATURES2D)) || \
40 ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_3D) && defined(HAVE_OPENCV_FEATURES)))
41
42#include <algorithm> // std::transform
43#include <float.h> // DBL_MAX
44#include <fstream> // std::ofstream
45#include <limits>
46#include <map> // std::map
47#include <numeric> // std::accumulate
48#include <stdlib.h> // srand, rand
49#include <time.h> // time
50#include <vector> // std::vector
51
52#include <visp3/core/vpDisplay.h>
53#include <visp3/core/vpImageConvert.h>
54#include <visp3/core/vpPixelMeterConversion.h>
55#include <visp3/core/vpPlane.h>
56#include <visp3/core/vpPoint.h>
57#include <visp3/vision/vpBasicKeyPoint.h>
58#include <visp3/vision/vpPose.h>
59#ifdef VISP_HAVE_MODULE_IO
60#include <visp3/io/vpImageIo.h>
61#endif
62#include <visp3/core/vpConvert.h>
63#include <visp3/core/vpCylinder.h>
64#include <visp3/core/vpMeterPixelConversion.h>
65#include <visp3/core/vpPolygon.h>
66#include <visp3/vision/vpXmlConfigParserKeyPoint.h>
67
68#include <opencv2/core/core.hpp>
69
70#if defined(HAVE_OPENCV_FEATURES2D)
71#include <opencv2/features2d/features2d.hpp>
72#endif
73
74#if defined(HAVE_OPENCV_XFEATURES2D)
75#include <opencv2/xfeatures2d.hpp>
76#endif
77
78#if defined(HAVE_OPENCV_IMGPROC)
79#include <opencv2/imgproc/imgproc.hpp>
80#endif
81
82#if defined(HAVE_OPENCV_NONFREE)
83#include <opencv2/nonfree/nonfree.hpp>
84#endif
85
273class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint
274{
275public:
290
300
302 typedef enum
303 {
308 } vpImageFormatType;
309
312 {
313#if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
314# if defined(HAVE_OPENCV_FEATURES)
319 DETECTOR_SIFT,
321# endif
322# if defined(HAVE_OPENCV_XFEATURES2D)
327 DETECTOR_MSD,
328 DETECTOR_STAR,
329# endif
330# if defined(OPENCV_ENABLE_NONFREE) && defined(HAVE_OPENCV_XFEATURES2D)
331 DETECTOR_SURF,
332# endif
333#else // OpenCV < 5.0.0
334# if defined(HAVE_OPENCV_FEATURES2D)
341# if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
345# endif
346# endif
347# if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
348 DETECTOR_MSD,
349# endif
350# if ((VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)) && defined(HAVE_OPENCV_FEATURES2D)
351 DETECTOR_SIFT,
352# endif
353#if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
354 DETECTOR_STAR,
355# endif
356# if defined(OPENCV_ENABLE_NONFREE) && defined(HAVE_OPENCV_XFEATURES2D)
357 DETECTOR_SURF,
358# endif
359#endif
360
362 };
363
366 {
367#if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
368# if defined(HAVE_OPENCV_FEATURES)
370 DESCRIPTOR_SIFT,
371# endif
372# if defined(HAVE_OPENCV_XFEATURES2D)
375 DESCRIPTOR_BoostDesc,
381 DESCRIPTOR_VGG,
382# endif
383# if defined(OPENCV_ENABLE_NONFREE) && defined(HAVE_OPENCV_XFEATURES2D)
384 DESCRIPTOR_SURF,
385# endif
386#else // opencv < 5.0.0
387# if defined(HAVE_OPENCV_FEATURES2D)
390# if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
393# endif
394# endif
395# if defined(HAVE_OPENCV_XFEATURES2D)
400# endif
401# if ((VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)) && defined(HAVE_OPENCV_FEATURES2D)
402 DESCRIPTOR_SIFT,
403# endif
404# if defined(OPENCV_ENABLE_NONFREE) && defined(HAVE_OPENCV_XFEATURES2D)
405 DESCRIPTOR_SURF,
406# endif
407#if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
408 DESCRIPTOR_BoostDesc,
409 DESCRIPTOR_VGG,
410# endif
411#endif
412
414 };
415
425 vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType,
426 const std::string &matcherName, const vpFilterMatchingType &filterType = ratioDistanceThreshold);
427
437 vpKeyPoint(const std::string &detectorName = "ORB", const std::string &extractorName = "ORB",
438 const std::string &matcherName = "BruteForce-Hamming",
439 const vpFilterMatchingType &filterType = ratioDistanceThreshold);
440
450 vpKeyPoint(const std::vector<std::string> &detectorNames, const std::vector<std::string> &extractorNames,
451 const std::string &matcherName = "BruteForce",
452 const vpFilterMatchingType &filterType = ratioDistanceThreshold);
453
460 unsigned int buildReference(const vpImage<unsigned char> &I) VP_OVERRIDE;
461
471 unsigned int buildReference(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
472 unsigned int width) VP_OVERRIDE;
473
481 unsigned int buildReference(const vpImage<unsigned char> &I, const vpRect &rectangle) VP_OVERRIDE;
482
494 unsigned int buildReference(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &trainKeyPoints,
495 std::vector<cv::Point3f> &points3f, bool append = false, int class_id = -1);
496
510 unsigned int buildReference(const vpImage<unsigned char> &I, const std::vector<cv::KeyPoint> &trainKeyPoints,
511 const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
512 bool append = false, int class_id = -1);
513
520 unsigned int buildReference(const vpImage<vpRGBa> &I_color);
521
531 unsigned int buildReference(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
532 unsigned int width);
533
541 unsigned int buildReference(const vpImage<vpRGBa> &I_color, const vpRect &rectangle);
542
554 unsigned int buildReference(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &trainKeyPoints,
555 std::vector<cv::Point3f> &points3f, bool append = false, int class_id = -1);
556
569 unsigned int buildReference(const vpImage<vpRGBa> &I_color, const std::vector<cv::KeyPoint> &trainKeyPoints,
570 const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
571 bool append = false, int class_id = -1);
572
588 static void compute3D(const cv::KeyPoint &candidate, const std::vector<vpPoint> &roi, const vpCameraParameters &cam,
589 const vpHomogeneousMatrix &cMo, cv::Point3f &point);
590
606 static void compute3D(const vpImagePoint &candidate, const std::vector<vpPoint> &roi, const vpCameraParameters &cam,
607 const vpHomogeneousMatrix &cMo, vpPoint &point);
608
625 static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
626 std::vector<cv::KeyPoint> &candidates,
627 const std::vector<vpPolygon> &polygons,
628 const std::vector<std::vector<vpPoint> > &roisPt,
629 std::vector<cv::Point3f> &points, cv::Mat *descriptors = nullptr);
630
647 static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
648 std::vector<vpImagePoint> &candidates,
649 const std::vector<vpPolygon> &polygons,
650 const std::vector<std::vector<vpPoint> > &roisPt,
651 std::vector<vpPoint> &points, cv::Mat *descriptors = nullptr);
652
668 static void
669 compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
670 std::vector<cv::KeyPoint> &candidates, const std::vector<vpCylinder> &cylinders,
671 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
672 std::vector<cv::Point3f> &points, cv::Mat *descriptors = nullptr);
673
689 static void
690 compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
691 std::vector<vpImagePoint> &candidates, const std::vector<vpCylinder> &cylinders,
692 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
693 std::vector<vpPoint> &points, cv::Mat *descriptors = nullptr);
694
708 bool computePose(const std::vector<cv::Point2f> &imagePoints, const std::vector<cv::Point3f> &objectPoints,
709 const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector<int> &inlierIndex,
710 double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = nullptr);
711
724 bool computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
725 double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = nullptr);
726
740 bool computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
741 std::vector<unsigned int> &inlierIndex, double &elapsedTime,
742 bool (*func)(const vpHomogeneousMatrix &) = nullptr);
743
752 void createImageMatching(vpImage<unsigned char> &IRef, vpImage<unsigned char> &ICurrent,
753 vpImage<unsigned char> &IMatching);
754
764 void createImageMatching(vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching);
765
774 void createImageMatching(vpImage<unsigned char> &IRef, vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching);
775
785 void createImageMatching(vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching);
786
794 void detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints,
795 const vpRect &rectangle = vpRect());
796
804 void detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle = vpRect());
805
813 void detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, const cv::Mat &mask = cv::Mat());
814
823 void detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
824 const vpRect &rectangle = vpRect());
825
834 void detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
835 const vpRect &rectangle = vpRect());
836
845 void detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
846 const cv::Mat &mask = cv::Mat());
847
862 void detectExtractAffine(const vpImage<unsigned char> &I, std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
863 std::vector<cv::Mat> &listOfDescriptors,
864 std::vector<vpImage<unsigned char> > *listOfAffineI = nullptr);
865
873 void display(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent, unsigned int size = 3) VP_OVERRIDE;
874
882 void display(const vpImage<unsigned char> &ICurrent, unsigned int size = 3, const vpColor &color = vpColor::green) VP_OVERRIDE;
883
891 void display(const vpImage<vpRGBa> &IRef, const vpImage<vpRGBa> &ICurrent, unsigned int size = 3);
892
900 void display(const vpImage<vpRGBa> &ICurrent, unsigned int size = 3, const vpColor &color = vpColor::green);
901
913 void displayMatching(const vpImage<unsigned char> &IRef, vpImage<unsigned char> &IMatching, unsigned int crossSize,
914 unsigned int lineThickness = 1, const vpColor &color = vpColor::green);
915
927 void displayMatching(const vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching,
928 const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
929 unsigned int crossSize = 3, unsigned int lineThickness = 1);
930
942 void displayMatching(const vpImage<unsigned char> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
943 unsigned int lineThickness = 1, const vpColor &color = vpColor::green);
944
956 void displayMatching(const vpImage<vpRGBa> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
957 unsigned int lineThickness = 1, const vpColor &color = vpColor::green);
958
970 void displayMatching(const vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching,
971 const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
972 unsigned int crossSize = 3, unsigned int lineThickness = 1);
973
984 void extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
985 std::vector<cv::Point3f> *trainPoints = nullptr);
986
997 void extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
998 std::vector<cv::Point3f> *trainPoints = nullptr);
999
1010 void extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1011 std::vector<cv::Point3f> *trainPoints = nullptr);
1012
1024 void extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1025 double &elapsedTime, std::vector<cv::Point3f> *trainPoints = nullptr);
1026
1038 void extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1039 double &elapsedTime, std::vector<cv::Point3f> *trainPoints = nullptr);
1040
1052 void extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors, double &elapsedTime,
1053 std::vector<cv::Point3f> *trainPoints = nullptr);
1054
1065 {
1066 if (!m_computeCovariance) {
1067 std::cout << "Warning : The covariance matrix has not been computed. "
1068 << "See setCovarianceComputation() to do it."
1069 << std::endl;
1070 return vpMatrix();
1071 }
1072
1073 if (m_computeCovariance && !m_useRansacVVS) {
1074 std::cout << "Warning : The covariance matrix can only be computed "
1075 << "with a Virtual Visual Servoing approach." << std::endl
1076 << "Use setUseRansacVVS(true) to choose to use a pose "
1077 << "estimation method based on a Virtual Visual Servoing approach." << std::endl;
1078 return vpMatrix();
1079 }
1080
1081 return m_covarianceMatrix;
1082 }
1083
1089 inline double getDetectionTime() const { return m_detectionTime; }
1090
1098 inline cv::Ptr<cv::FeatureDetector> getDetector(const vpFeatureDetectorType &type) const
1099 {
1100 std::map<vpFeatureDetectorType, std::string>::const_iterator it_name = m_mapOfDetectorNames.find(type);
1101 if (it_name == m_mapOfDetectorNames.end()) {
1102 std::cerr << "Internal problem with the feature type and the corresponding name!" << std::endl;
1103 }
1104
1105 std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector =
1106 m_detectors.find(it_name->second);
1107 if (findDetector != m_detectors.end()) {
1108 return findDetector->second;
1109 }
1110
1111 std::cerr << "Cannot find: " << it_name->second << std::endl;
1112 return cv::Ptr<cv::FeatureDetector>();
1113 }
1114
1122 inline cv::Ptr<cv::FeatureDetector> getDetector(const std::string &name) const
1123 {
1124 std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector = m_detectors.find(name);
1125 if (findDetector != m_detectors.end()) {
1126 return findDetector->second;
1127 }
1128
1129 std::cerr << "Cannot find: " << name << std::endl;
1130 return cv::Ptr<cv::FeatureDetector>();
1131 }
1132
1136 inline std::map<vpFeatureDetectorType, std::string> getDetectorNames() const { return m_mapOfDetectorNames; }
1137
1143 inline double getExtractionTime() const { return m_extractionTime; }
1144
1152 inline cv::Ptr<cv::DescriptorExtractor> getExtractor(const vpFeatureDescriptorType &type) const
1153 {
1154 std::map<vpFeatureDescriptorType, std::string>::const_iterator it_name = m_mapOfDescriptorNames.find(type);
1155 if (it_name == m_mapOfDescriptorNames.end()) {
1156 std::cerr << "Internal problem with the feature type and the corresponding name!" << std::endl;
1157 }
1158
1159 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor =
1160 m_extractors.find(it_name->second);
1161 if (findExtractor != m_extractors.end()) {
1162 return findExtractor->second;
1163 }
1164
1165 std::cerr << "Cannot find: " << it_name->second << std::endl;
1166 return cv::Ptr<cv::DescriptorExtractor>();
1167 }
1168
1176 inline cv::Ptr<cv::DescriptorExtractor> getExtractor(const std::string &name) const
1177 {
1178 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor = m_extractors.find(name);
1179 if (findExtractor != m_extractors.end()) {
1180 return findExtractor->second;
1181 }
1182
1183 std::cerr << "Cannot find: " << name << std::endl;
1184 return cv::Ptr<cv::DescriptorExtractor>();
1185 }
1186
1190 inline std::map<vpFeatureDescriptorType, std::string> getExtractorNames() const { return m_mapOfDescriptorNames; }
1191
1197 inline vpImageFormatType getImageFormat() const { return m_imageFormat; }
1198
1204 inline double getMatchingTime() const { return m_matchingTime; }
1205
1211 inline cv::Ptr<cv::DescriptorMatcher> getMatcher() const { return m_matcher; }
1212
1219 inline std::vector<cv::DMatch> getMatches() const { return m_filteredMatches; }
1220
1228 inline std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > getMatchQueryToTrainKeyPoints() const
1229 {
1230 std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > matchQueryToTrainKeyPoints(m_filteredMatches.size());
1231 for (size_t i = 0; i < m_filteredMatches.size(); i++) {
1232 matchQueryToTrainKeyPoints.push_back(
1233 std::pair<cv::KeyPoint, cv::KeyPoint>(m_queryFilteredKeyPoints[static_cast<size_t>(m_filteredMatches[i].queryIdx)],
1234 m_trainKeyPoints[static_cast<size_t>(m_filteredMatches[i].trainIdx)]));
1235 }
1236 return matchQueryToTrainKeyPoints;
1237 }
1238
1244 inline unsigned int getNbImages() const { return static_cast<unsigned int>(m_mapOfImages.size()); }
1245
1253 void getObjectPoints(std::vector<cv::Point3f> &objectPoints) const;
1254
1262 void getObjectPoints(std::vector<vpPoint> &objectPoints) const;
1263
1269 inline double getPoseTime() const { return m_poseTime; }
1270
1277 inline cv::Mat getQueryDescriptors() const { return m_queryDescriptors; }
1278
1287 void getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints, bool matches = true) const;
1288
1297 void getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints, bool matches = true) const;
1298
1304 inline std::vector<vpImagePoint> getRansacInliers() const { return m_ransacInliers; }
1305
1311 inline std::vector<vpImagePoint> getRansacOutliers() const { return m_ransacOutliers; }
1312
1319 inline cv::Mat getTrainDescriptors() const { return m_trainDescriptors; }
1320
1326 void getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const;
1327
1333 void getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints) const;
1334
1341 void getTrainPoints(std::vector<cv::Point3f> &points) const;
1342
1349 void getTrainPoints(std::vector<vpPoint> &points) const;
1350
1356 void initMatcher(const std::string &matcherName);
1357
1366 void insertImageMatching(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent,
1367 vpImage<unsigned char> &IMatching);
1368
1376 void insertImageMatching(const vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching);
1377
1386 void insertImageMatching(const vpImage<vpRGBa> &IRef, const vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching);
1387
1395 void insertImageMatching(const vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching);
1396
1402 void loadConfigFile(const std::string &configFile);
1403
1412 void loadLearningData(const std::string &filename, bool binaryMode = false, bool append = false);
1413
1422 void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector<cv::DMatch> &matches,
1423 double &elapsedTime);
1424
1432 unsigned int matchPoint(const vpImage<unsigned char> &I) VP_OVERRIDE;
1433
1444 unsigned int matchPoint(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
1445 unsigned int width) VP_OVERRIDE;
1446
1455 unsigned int matchPoint(const vpImage<unsigned char> &I, const vpRect &rectangle) VP_OVERRIDE;
1456
1465 unsigned int matchPoint(const std::vector<cv::KeyPoint> &queryKeyPoints, const cv::Mat &queryDescriptors);
1466
1480 bool (*func)(const vpHomogeneousMatrix &) = nullptr, const vpRect &rectangle = vpRect());
1481
1498 double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = nullptr,
1499 const vpRect &rectangle = vpRect());
1500
1521 bool matchPointAndDetect(const vpImage<unsigned char> &I, vpRect &boundingBox, vpImagePoint &centerOfGravity,
1522 const bool isPlanarObject = true, std::vector<vpImagePoint> *imPts1 = nullptr,
1523 std::vector<vpImagePoint> *imPts2 = nullptr, double *meanDescriptorDistance = nullptr,
1524 double *detectionScore = nullptr, const vpRect &rectangle = vpRect());
1525
1545 bool matchPointAndDetect(const vpImage<unsigned char> &I, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
1546 double &error, double &elapsedTime, vpRect &boundingBox, vpImagePoint &centerOfGravity,
1547 bool (*func)(const vpHomogeneousMatrix &) = nullptr, const vpRect &rectangle = vpRect());
1548
1556 unsigned int matchPoint(const vpImage<vpRGBa> &I_color);
1557
1568 unsigned int matchPoint(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
1569 unsigned int width);
1570
1579 unsigned int matchPoint(const vpImage<vpRGBa> &I_color, const vpRect &rectangle);
1580
1593 bool matchPoint(const vpImage<vpRGBa> &I_color, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
1594 bool (*func)(const vpHomogeneousMatrix &) = nullptr, const vpRect &rectangle = vpRect());
1595
1611 bool matchPoint(const vpImage<vpRGBa> &I_color, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
1612 double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = nullptr,
1613 const vpRect &rectangle = vpRect());
1614
1618 void reset();
1619
1628 void saveLearningData(const std::string &filename, bool binaryMode = false, bool saveTrainingImages = true);
1629
1636 inline void setCovarianceComputation(const bool &flag)
1637 {
1638 m_computeCovariance = flag;
1639 if (!m_useRansacVVS) {
1640 std::cout << "Warning : The covariance matrix can only be computed "
1641 << "with a Virtual Visual Servoing approach." << std::endl
1642 << "Use setUseRansacVVS(true) to choose to use a pose "
1643 << "estimation method based on a Virtual "
1644 << "Visual Servoing approach." << std::endl;
1645 }
1646 }
1647
1653 inline void setDetectionMethod(const vpDetectionMethodType &method) { m_detectionMethod = method; }
1654
1660 inline void setDetector(const vpFeatureDetectorType &detectorType)
1661 {
1662 m_detectorNames.clear();
1663 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
1664 m_detectors.clear();
1665 initDetector(m_mapOfDetectorNames[detectorType]);
1666 }
1667
1673 inline void setDetector(const std::string &detectorName)
1674 {
1675 m_detectorNames.clear();
1676 m_detectorNames.push_back(detectorName);
1677 m_detectors.clear();
1678 initDetector(detectorName);
1679 }
1680
1681#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1690 template <typename T1, typename T2, typename T3>
1691 inline void setDetectorParameter(const T1 detectorName, const T2 parameterName, const T3 value)
1692 {
1693 if (m_detectors.find(detectorName) != m_detectors.end()) {
1694 m_detectors[detectorName]->set(parameterName, value);
1695 }
1696 }
1697#endif
1698
1705 inline void setDetectors(const std::vector<std::string> &detectorNames)
1706 {
1707 m_detectorNames.clear();
1708 m_detectors.clear();
1709 m_detectorNames = detectorNames;
1710 initDetectors(m_detectorNames);
1711 }
1712
1718 inline void setExtractor(const vpFeatureDescriptorType &extractorType)
1719 {
1720 m_extractorNames.clear();
1721 m_extractorNames.push_back(m_mapOfDescriptorNames[extractorType]);
1722 m_extractors.clear();
1723 initExtractor(m_mapOfDescriptorNames[extractorType]);
1724 }
1725
1732 inline void setExtractor(const std::string &extractorName)
1733 {
1734 m_extractorNames.clear();
1735 m_extractorNames.push_back(extractorName);
1736 m_extractors.clear();
1737 initExtractor(extractorName);
1738 }
1739
1740#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1749 template <typename T1, typename T2, typename T3>
1750 inline void setExtractorParameter(const T1 extractorName, const T2 parameterName, const T3 value)
1751 {
1752 if (m_extractors.find(extractorName) != m_extractors.end()) {
1753 m_extractors[extractorName]->set(parameterName, value);
1754 }
1755 }
1756#endif
1757
1764 inline void setExtractors(const std::vector<std::string> &extractorNames)
1765 {
1766 m_extractorNames.clear();
1767 m_extractorNames = extractorNames;
1768 m_extractors.clear();
1769 initExtractors(m_extractorNames);
1770 }
1771
1777 inline void setImageFormat(const vpImageFormatType &imageFormat) { m_imageFormat = imageFormat; }
1778
1794 inline void setMatcher(const std::string &matcherName)
1795 {
1796 m_matcherName = matcherName;
1797 initMatcher(m_matcherName);
1798 }
1799
1805 void setMaxFeatures(int maxFeatures) { m_maxFeatures = maxFeatures; }
1806
1822 inline void setFilterMatchingType(const vpFilterMatchingType &filterType)
1823 {
1824 m_filterType = filterType;
1825
1826 // Use k-nearest neighbors (knn) to retrieve the two best matches for a
1827 // keypoint So this is useful only for ratioDistanceThreshold method
1828 if (filterType == ratioDistanceThreshold || filterType == stdAndRatioDistanceThreshold) {
1829 m_useKnn = true;
1830
1831#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1832 if (m_matcher != nullptr && m_matcherName == "BruteForce") {
1833 // if a matcher is already initialized, disable the crossCheck
1834 // because it will not work with knnMatch
1835 m_matcher->set("crossCheck", false);
1836 }
1837#endif
1838 }
1839 else {
1840 m_useKnn = false;
1841
1842#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1843 if (m_matcher != nullptr && m_matcherName == "BruteForce") {
1844 // if a matcher is already initialized, set the crossCheck mode if
1845 // necessary
1846 m_matcher->set("crossCheck", m_useBruteForceCrossCheck);
1847 }
1848#endif
1849 }
1850 }
1851
1858 inline void setMatchingFactorThreshold(const double factor)
1859 {
1860 if (factor > 0.0) {
1861 m_matchingFactorThreshold = factor;
1862 }
1863 else {
1864 throw vpException(vpException::badValue, "The factor must be positive.");
1865 }
1866 }
1867
1873 inline void setMatchingRatioThreshold(double ratio)
1874 {
1875 if (ratio > 0.0 && (ratio < 1.0 || std::fabs(ratio - 1.0) < std::numeric_limits<double>::epsilon())) {
1876 m_matchingRatioThreshold = ratio;
1877 }
1878 else {
1879 throw vpException(vpException::badValue, "The ratio must be in the interval ]0 ; 1].");
1880 }
1881 }
1882
1889 inline void setRansacConsensusPercentage(double percentage)
1890 {
1891 if (percentage > 0.0 &&
1892 (percentage < 100.0 || std::fabs(percentage - 100.0) < std::numeric_limits<double>::epsilon())) {
1893 m_ransacConsensusPercentage = percentage;
1894 }
1895 else {
1896 throw vpException(vpException::badValue, "The percentage must be in the interval ]0 ; 100].");
1897 }
1898 }
1899
1903 inline void setRansacFilterFlag(const vpPose::RANSAC_FILTER_FLAGS &flag) { m_ransacFilterFlag = flag; }
1904
1911 inline void setRansacIteration(int nbIter)
1912 {
1913 if (nbIter > 0) {
1914 m_nbRansacIterations = nbIter;
1915 }
1916 else {
1917 throw vpException(vpException::badValue, "The number of iterations must be greater than zero.");
1918 }
1919 }
1920
1926 inline void setRansacParallel(bool parallel) { m_ransacParallel = parallel; }
1927
1934 inline void setRansacParallelNbThreads(unsigned int nthreads) { m_ransacParallelNbThreads = nthreads; }
1935
1943 inline void setRansacReprojectionError(double reprojectionError)
1944 {
1945 if (reprojectionError > 0.0) {
1946 m_ransacReprojectionError = reprojectionError;
1947 }
1948 else {
1949 throw vpException(vpException::badValue, "The Ransac reprojection "
1950 "threshold must be positive "
1951 "as we deal with distance.");
1952 }
1953 }
1954
1960 inline void setRansacMinInlierCount(int minCount)
1961 {
1962 if (minCount > 0) {
1963 m_nbRansacMinInlierCount = minCount;
1964 }
1965 else {
1966 throw vpException(vpException::badValue, "The minimum number of inliers must be greater than zero.");
1967 }
1968 }
1969
1976 inline void setRansacThreshold(double threshold)
1977 {
1978 if (threshold > 0.0) {
1979 m_ransacThreshold = threshold;
1980 }
1981 else {
1982 throw vpException(vpException::badValue, "The Ransac threshold must be positive as we deal with distance.");
1983 }
1984 }
1985
1993 inline void setUseAffineDetection(bool useAffine) { m_useAffineDetection = useAffine; }
1994
1995#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2002 inline void setUseBruteForceCrossCheck(bool useCrossCheck)
2003 {
2004 // Only available with BruteForce and with k=1 (i.e not used with a
2005 // ratioDistanceThreshold method)
2006 if (m_matcher != nullptr && !m_useKnn && m_matcherName == "BruteForce") {
2007 m_matcher->set("crossCheck", useCrossCheck);
2008 }
2009 else if (m_matcher != nullptr && m_useKnn && m_matcherName == "BruteForce") {
2010 std::cout << "Warning, you try to set the crossCheck parameter with a "
2011 << "BruteForce matcher but knn is enabled"
2012 << " (the filtering method uses a ratio constraint)" << std::endl;
2013 }
2014 }
2015#endif
2016
2023 inline void setUseMatchTrainToQuery(bool useMatchTrainToQuery) { m_useMatchTrainToQuery = useMatchTrainToQuery; }
2024
2032 inline void setUseRansacConsensusPercentage(bool usePercentage) { m_useConsensusPercentage = usePercentage; }
2033
2041 inline void setUseRansacVVS(bool ransacVVS) { m_useRansacVVS = ransacVVS; }
2042
2049 inline void setUseSingleMatchFilter(bool singleMatchFilter) { m_useSingleMatchFilter = singleMatchFilter; }
2050
2051private:
2054 bool m_computeCovariance;
2056 vpMatrix m_covarianceMatrix;
2058 int m_currentImageId;
2061 vpDetectionMethodType m_detectionMethod;
2063 double m_detectionScore;
2066 double m_detectionThreshold;
2068 double m_detectionTime;
2070 std::vector<std::string> m_detectorNames;
2073 // with a key based upon the detector name.
2074 std::map<std::string, cv::Ptr<cv::FeatureDetector> > m_detectors;
2076 double m_extractionTime;
2078 std::vector<std::string> m_extractorNames;
2081 // with a key based upon the extractor name.
2082 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> > m_extractors;
2084 std::vector<cv::DMatch> m_filteredMatches;
2086 vpFilterMatchingType m_filterType;
2088 vpImageFormatType m_imageFormat;
2091 std::vector<std::vector<cv::DMatch> > m_knnMatches;
2093 std::map<vpFeatureDescriptorType, std::string> m_mapOfDescriptorNames;
2095 std::map<vpFeatureDetectorType, std::string> m_mapOfDetectorNames;
2098 std::map<int, int> m_mapOfImageId;
2101 std::map<int, vpImage<unsigned char> > m_mapOfImages;
2104 cv::Ptr<cv::DescriptorMatcher> m_matcher;
2106 std::string m_matcherName;
2108 std::vector<cv::DMatch> m_matches;
2110 double m_matchingFactorThreshold;
2112 double m_matchingRatioThreshold;
2114 double m_matchingTime;
2116 std::vector<std::pair<cv::KeyPoint, cv::Point3f> > m_matchRansacKeyPointsToPoints;
2118 int m_nbRansacIterations;
2120 int m_nbRansacMinInlierCount;
2123 std::vector<cv::Point3f> m_objectFilteredPoints;
2125 double m_poseTime;
2128 cv::Mat m_queryDescriptors;
2130 std::vector<cv::KeyPoint> m_queryFilteredKeyPoints;
2132 std::vector<cv::KeyPoint> m_queryKeyPoints;
2135 double m_ransacConsensusPercentage;
2137 vpPose::RANSAC_FILTER_FLAGS m_ransacFilterFlag;
2139 std::vector<vpImagePoint> m_ransacInliers;
2141 std::vector<vpImagePoint> m_ransacOutliers;
2143 bool m_ransacParallel;
2145 unsigned int m_ransacParallelNbThreads;
2148 double m_ransacReprojectionError;
2151 double m_ransacThreshold;
2154 // detected in the train images).
2155 cv::Mat m_trainDescriptors;
2157 std::vector<cv::KeyPoint> m_trainKeyPoints;
2160 std::vector<cv::Point3f> m_trainPoints;
2163 std::vector<vpPoint> m_trainVpPoints;
2166 bool m_useAffineDetection;
2167#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2171 bool m_useBruteForceCrossCheck;
2172#endif
2175 bool m_useConsensusPercentage;
2177 bool m_useKnn;
2182 bool m_useMatchTrainToQuery;
2184 bool m_useRansacVVS;
2187 bool m_useSingleMatchFilter;
2191 int m_maxFeatures;
2192
2201 void affineSkew(double tilt, double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai);
2202
2217 double computePoseEstimationError(const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
2218 const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est);
2219
2223 void filterMatches();
2224
2229 void init() VP_OVERRIDE;
2230
2236 void initDetector(const std::string &detectorNames);
2237
2244 void initDetectors(const std::vector<std::string> &detectorNames);
2245
2251 void initExtractor(const std::string &extractorName);
2252
2259 void initExtractors(const std::vector<std::string> &extractorNames);
2260
2264 void initFeatureNames();
2265
2266 inline size_t myKeypointHash(const cv::KeyPoint &kp)
2267 {
2268 size_t _val = 2166136261U, scale = 16777619U;
2269 Cv32suf u;
2270 u.f = kp.pt.x;
2271 _val = (scale * _val) ^ u.u;
2272 u.f = kp.pt.y;
2273 _val = (scale * _val) ^ u.u;
2274 u.f = kp.size;
2275 _val = (scale * _val) ^ u.u;
2276 // As the keypoint angle can be computed for certain type of keypoint only
2277 // when extracting the corresponding descriptor, the angle field is not
2278 // taking into account for the hash
2279 // u.f = kp.angle; _val = (scale * _val) ^ u.u;
2280 u.f = kp.response;
2281 _val = (scale * _val) ^ u.u;
2282 _val = (scale * _val) ^ (static_cast<size_t>(kp.octave));
2283 _val = (scale * _val) ^ (static_cast<size_t>(kp.class_id));
2284 return _val;
2285 }
2286
2287#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2288 /*
2289 * Adapts a detector to detect points over multiple levels of a Gaussian
2290 * pyramid. Useful for detectors that are not inherently scaled.
2291 * From OpenCV 2.4.11 source code.
2292 */
2293 class PyramidAdaptedFeatureDetector : public cv::FeatureDetector
2294 {
2295 public:
2296 // maxLevel - The 0-based index of the last pyramid layer
2297 PyramidAdaptedFeatureDetector(const cv::Ptr<cv::FeatureDetector> &detector, int maxLevel = 2);
2298
2299 // TODO implement read/write
2300 virtual bool empty() const VP_OVERRIDE;
2301
2302 protected:
2303 virtual void detect(cv::InputArray image, CV_OUT std::vector<cv::KeyPoint> &keypoints,
2304 cv::InputArray mask = cv::noArray()) VP_OVERRIDE;
2305 virtual void detectImpl(const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
2306 const cv::Mat &mask = cv::Mat()) const;
2307
2308 cv::Ptr<cv::FeatureDetector> m_detector;
2309 int m_maxLevel;
2310 };
2311
2312 /*
2313 * A class filters a vector of keypoints.
2314 * Because now it is difficult to provide a convenient interface for all
2315 * usage scenarios of the keypoints filter class, it has only several needed
2316 * by now static methods.
2317 */
2318 class KeyPointsFilter
2319 {
2320 public:
2321 KeyPointsFilter() { }
2322
2323 /*
2324 * Remove keypoints within borderPixels of an image edge.
2325 */
2326 static void runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize, int borderSize);
2327 /*
2328 * Remove keypoints of sizes out of range.
2329 */
2330 static void runByKeypointSize(std::vector<cv::KeyPoint> &keypoints, float minSize, float maxSize = FLT_MAX);
2331 /*
2332 * Remove keypoints from some image by mask for pixels of this image.
2333 */
2334 static void runByPixelsMask(std::vector<cv::KeyPoint> &keypoints, const cv::Mat &mask);
2335 /*
2336 * Remove duplicated keypoints.
2337 */
2338 static void removeDuplicated(std::vector<cv::KeyPoint> &keypoints);
2339
2340 /*
2341 * Retain the specified number of the best keypoints (according to the
2342 * response)
2343 */
2344 static void retainBest(std::vector<cv::KeyPoint> &keypoints, int npoints);
2345 };
2346
2347#endif
2348};
2349END_VISP_NAMESPACE
2350#endif
2351#endif
virtual unsigned int buildReference(const vpImage< unsigned char > &I)=0
virtual unsigned int matchPoint(const vpImage< unsigned char > &I)=0
virtual void display(const vpImage< unsigned char > &Iref, const vpImage< unsigned char > &Icurrent, unsigned int size=3)=0
Generic class defining intrinsic camera parameters.
Class to define RGB colors available for display functionalities.
Definition vpColor.h:157
error that can be emitted by ViSP classes.
Definition vpException.h:60
@ badValue
Used to indicate that a value is not in the allowed range.
Definition vpException.h:73
Implementation of an homogeneous matrix and operations on such kind of matrices.
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition of the vpImage class member functions.
Definition vpImage.h:131
Class that allows keypoints 2D features detection (and descriptors extraction) and matching thanks to...
Definition vpKeyPoint.h:274
double getDetectionTime() const
std::vector< vpImagePoint > getRansacInliers() const
void setMatchingFactorThreshold(const double factor)
cv::Ptr< cv::DescriptorMatcher > getMatcher() const
void setRansacConsensusPercentage(double percentage)
@ detectionThreshold
Definition vpKeyPoint.h:294
void setRansacParallel(bool parallel)
void setRansacReprojectionError(double reprojectionError)
void setExtractor(const std::string &extractorName)
void setUseSingleMatchFilter(bool singleMatchFilter)
void setFilterMatchingType(const vpFilterMatchingType &filterType)
void initMatcher(const std::string &matcherName)
void setRansacParallelNbThreads(unsigned int nthreads)
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
double getExtractionTime() const
void setUseRansacVVS(bool ransacVVS)
void setDetectors(const std::vector< std::string > &detectorNames)
void setExtractors(const std::vector< std::string > &extractorNames)
cv::Mat getTrainDescriptors() const
@ DETECTOR_KAZE
KAZE detector.
Definition vpKeyPoint.h:344
@ DETECTOR_BRISK
BRISK detector.
Definition vpKeyPoint.h:335
@ DETECTOR_AKAZE
AKAZE detector.
Definition vpKeyPoint.h:343
@ DETECTOR_MSER
MSER detector.
Definition vpKeyPoint.h:338
@ DETECTOR_TYPE_SIZE
Number of detectors available.
Definition vpKeyPoint.h:361
@ DETECTOR_AGAST
AGAST detector.
Definition vpKeyPoint.h:342
@ DETECTOR_FAST
FAST detector.
Definition vpKeyPoint.h:336
@ DETECTOR_GFTT
GFTT detector.
Definition vpKeyPoint.h:337
@ DETECTOR_ORB
ORB detector.
Definition vpKeyPoint.h:339
@ DETECTOR_SimpleBlob
SimpleBlob detector.
Definition vpKeyPoint.h:340
void setExtractor(const vpFeatureDescriptorType &extractorType)
void setImageFormat(const vpImageFormatType &imageFormat)
cv::Ptr< cv::DescriptorExtractor > getExtractor(const vpFeatureDescriptorType &type) const
std::vector< vpImagePoint > getRansacOutliers() const
void setRansacThreshold(double threshold)
void setRansacMinInlierCount(int minCount)
void setRansacFilterFlag(const vpPose::RANSAC_FILTER_FLAGS &flag)
double getPoseTime() const
unsigned int getNbImages() const
double getMatchingTime() const
vpFeatureDescriptorType
Definition vpKeyPoint.h:366
@ DESCRIPTOR_LATCH
LATCH descriptor.
Definition vpKeyPoint.h:399
@ DESCRIPTOR_AKAZE
AKAZE descriptor.
Definition vpKeyPoint.h:391
@ DESCRIPTOR_BRIEF
BRIEF descriptor.
Definition vpKeyPoint.h:396
@ DESCRIPTOR_TYPE_SIZE
Number of descriptors available.
Definition vpKeyPoint.h:413
@ DESCRIPTOR_FREAK
FREAK descriptor.
Definition vpKeyPoint.h:398
@ DESCRIPTOR_ORB
ORB descriptor.
Definition vpKeyPoint.h:389
@ DESCRIPTOR_KAZE
KAZE descriptor.
Definition vpKeyPoint.h:392
@ DESCRIPTOR_DAISY
DAISY descriptor.
Definition vpKeyPoint.h:397
@ DESCRIPTOR_BRISK
BRISK descriptor.
Definition vpKeyPoint.h:388
cv::Ptr< cv::DescriptorExtractor > getExtractor(const std::string &name) const
void setMatcher(const std::string &matcherName)
vpImageFormatType getImageFormat() const
std::map< vpFeatureDescriptorType, std::string > getExtractorNames() const
void setUseAffineDetection(bool useAffine)
void setUseRansacConsensusPercentage(bool usePercentage)
void setMatchingRatioThreshold(double ratio)
std::map< vpFeatureDetectorType, std::string > getDetectorNames() const
@ stdAndRatioDistanceThreshold
Definition vpKeyPoint.h:286
@ constantFactorDistanceThreshold
Definition vpKeyPoint.h:279
@ ratioDistanceThreshold
Definition vpKeyPoint.h:283
@ stdDistanceThreshold
Definition vpKeyPoint.h:281
void setCovarianceComputation(const bool &flag)
void setDetector(const vpFeatureDetectorType &detectorType)
std::vector< cv::DMatch > getMatches() const
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
std::vector< std::pair< cv::KeyPoint, cv::KeyPoint > > getMatchQueryToTrainKeyPoints() const
void setUseMatchTrainToQuery(bool useMatchTrainToQuery)
vpMatrix getCovarianceMatrix() const
cv::Ptr< cv::FeatureDetector > getDetector(const std::string &name) const
void setDetectionMethod(const vpDetectionMethodType &method)
void setDetector(const std::string &detectorName)
void setMaxFeatures(int maxFeatures)
void setRansacIteration(int nbIter)
cv::Mat getQueryDescriptors() const
Implementation of a matrix and operations on matrices.
Definition vpMatrix.h:175
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition vpPoint.h:79
RANSAC_FILTER_FLAGS
Definition vpPose.h:112
Defines a rectangle in the plane.
Definition vpRect.h:79