特征点检测与匹配算法——surf
#include <opencv2/opencv.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <sys/time.h>
#include <iostream>
using namespace std;
int main()
{
cv::Mat srcImage1=cv::imread("cloned1.png");
cv::Mat srcImage2=cv::imread("cloned2.png");
struct timeval t1,t2;
struct timeval c1,c2;
gettimeofday(&t1,NULL);
int minHessian=400;//SURF算法中的hessian阈值
cv::SurfFeatureDetector detector(minHessian);
std::vector<cv::KeyPoint>keyPoints_object,keyPoints_scene;
//调用detect函数检测出SURF特征关键点
detector.detect(srcImage1, keyPoints_object);
detector.detect(srcImage2, keyPoints_scene);
gettimeofday(&c1,NULL);
//计算描述符(特征向量)
cv::SurfDescriptorExtractor extractor;
cv::Mat descriptor_object,descriptor_scene;
extractor.compute(srcImage1, keyPoints_object, descriptor_object);
extractor.compute(srcImage2, keyPoints_scene, descriptor_scene);
//使用FLANN匹配算子进行匹配
cv::FlannBasedMatcher matcher;
std::vector<cv::DMatch>matches;
matcher.match(descriptor_object, descriptor_scene, matches);
double max_dist=0;double min_dist=100;//最小距离和最大距离
//计算出关键点之间距离的最大值和最小值
for(int i=0;i<descriptor_object.rows;i++)
{
double dist=matches[i].distance;
if(dist<min_dist)min_dist=dist;
if(dist>max_dist)max_dist=dist;
}
//printf(">Max dist最大距离 : %f \n",max_dist);
//printf(">Min dist最小距离 : %f \n",min_dist);
//存下匹配距离小于3*min_dist的点对
std::vector<cv::DMatch> good_matches;
for(int i=0;i<descriptor_object.rows;++i)
{
if(matches[i].distance<3*min_dist)
{
good_matches.push_back(matches[i]);
}
}
//绘制出匹配到的关键点
cv::Mat img_matches;
cv::drawMatches(srcImage1,keyPoints_object,srcImage2,keyPoints_scene,good_matches,img_matches,cv::Scalar::all(-1),cv::Scalar::all(-1),std::vector<char>(),cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//定义两个局部变量
std::vector<cv::Point2f> obj;
std::vector<cv::Point2f> scene;
//从匹配成功的匹配对中获取关键点
for(unsigned int i=0;i<good_matches.size();++i)
{
obj.push_back(keyPoints_object[good_matches[i].trainIdx].pt);
scene.push_back(keyPoints_scene[good_matches[i].queryIdx].pt);
}
cv::Mat H=cv::findHomography(obj,scene,CV_RANSAC,3,cv::noArray());//计算透视变换
//从待测图片中获取角点
std::vector<cv::Point2f> obj_corners(4);
obj_corners[0]=cvPoint(0,0);obj_corners[1]=cvPoint(srcImage1.cols,0);
obj_corners[2]=cvPoint(srcImage1.cols, srcImage1.rows);
obj_corners[3]=cvPoint(0,srcImage1.rows);
std::vector<cv::Point2f> scene_corners(4);
//进行透视变换
cv::perspectiveTransform(obj_corners, scene_corners, H);
//绘制出角点之间的直线
line(img_matches,scene_corners[0]+cv::Point2f(static_cast<float>(srcImage1.cols),0),scene_corners[1]+cv::Point2f(static_cast<float>(srcImage1.cols),0),cv::Scalar(255,0,123),4);
line(img_matches,scene_corners[1]+cv::Point2f(static_cast<float>(srcImage1.cols),0),scene_corners[2]+cv::Point2f(static_cast<float>(srcImage1.cols),0),cv::Scalar(255,0,123),4);
line(img_matches,scene_corners[2]+cv::Point2f(static_cast<float>(srcImage1.cols),0),scene_corners[3]+cv::Point2f(static_cast<float>(srcImage1.cols),0),cv::Scalar(255,0,123),4);
line(img_matches,scene_corners[3]+cv::Point2f(static_cast<float>(srcImage1.cols),0),scene_corners[0]+cv::Point2f(static_cast<float>(srcImage1.cols),0),cv::Scalar(255,0,123),4);
gettimeofday(&t2,NULL);
cout<<t2.tv_usec-t1.tv_usec<<endl;
cout<<t2.tv_usec-c1.tv_usec<<endl;
imshow("Good Matches & Object detection",img_matches);
cv::waitKey(0);
return 0;
}
版权声明:本文为GUI1259802368原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。