查阅资料得在经过sift匹配之后,特征点类如下所示
class KeyPoint
{ Point2f pt; //坐标
float size; //特征点邻域直径
float angle; //特征点的方向,值为[零,三百六十),负值表示不使用
float response;
int octave; //特征点所在的图像金字塔的组
int class_id; //用于聚类的id
}
opencv-python中也一样
#coding=utf-8
'''
@project : binocular_vision
@author : Hoodie_Willi
#@description: $输出sift匹配后,匹配点的坐标
#@time : 2019-05-28 10:25:36
'''
import numpy as np
import cv2
sift = cv2.xfeatures2d.SIFT_create()
img1 = cv2.imread("./img/l/left.jpg", cv2.COLOR_BGR2GRAY)
img2 = cv2.imread("./img/r/right.jpg", cv2.COLOR_BGR2GRAY)
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
bf =cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
# ## Create flann matcher
# FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing
# flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
# #matcher = cv2.FlannBasedMatcher_create()
# matcher = cv2.FlannBasedMatcher(flann_params, {})
## Ratio test
print(len(matches))
matchesMask = [[0, 0] for i in range(len(matches))]
for i, (m1, m2) in enumerate(matches):
if m1.distance < 0.7 * m2.distance:# 两个特征向量之间的欧氏距离,越小表明匹配度越高。
matchesMask[i] = [1, 0]
pt1 = kp1[m1.queryIdx].pt # trainIdx 是匹配之后所对应关键点的序号,第一个载入图片的匹配关键点序号
pt2 = kp2[m1.trainIdx].pt # queryIdx 是匹配之后所对应关键点的序号,第二个载入图片的匹配关键点序号
# print(kpts1)
print(i, pt1, pt2)
if i % 5 ==0:
cv2.circle(img1, (int(pt1[0]),int(pt1[1])), 5, (255,0,255), -1)
cv2.circle(img2, (int(pt2[0]),int(pt2[1])), 5, (255,0,255), -1)
# 匹配点为蓝点, 坏点为红点
draw_params = dict(matchColor = (255, 0,0),
singlePointColor = (0,0,255),
matchesMask = matchesMask,
flags = 0)
res = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matches, None, **draw_params)
cv2.imshow("Result", res)
cv2.waitKey(0)
cv2.destroyAllWindows()
匹配结果如图:
坐标如图所示