-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
executable file
·139 lines (116 loc) · 6.05 KB
/
main.py
File metadata and controls
executable file
·139 lines (116 loc) · 6.05 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
"""
Directory Structure:
--------------------
your_project/
├── images/
│ ├── left.jpg # Input left image.
│ └── right.jpg # Input right image.
├── features.py # Contains keypoint detectors, descriptors, and matchers.
├── transformation.py # Additional module
└── main.py # Main script: loads images, processes features, and saves results.
Output images (e.g., left_harris.jpg, right_harris.jpg, matches.jpg) need be saved in the images/ folder.
Fixed Parameters:
-----------------
- HARRIS_THRESHOLD = 0.08 : Only keypoints with a Harris response greater than 0.08 will be kept.
- HARRIS_KERNEL = 7 : Use a 7-pixel kernel for non-maximum suppression.
- MATCH_RATIO_THRESH = 0.9 : Ratio threshold for feature matching.
- Drawing parameters:
- GREEN = (0, 255, 0) : Color for drawing.
- RADIUS = 5 : Radius for drawing circles at keypoint locations.
"""
import cv2
import numpy as np
from scipy import spatial
from features import HarrisKeypointDetector, MOPSFeatureDescriptor,ORBKeypointDetector, ORBFeatureMatcher, ORBFeatureDescriptor
# Your TunedHarrisDetector should extend HarrisKeypointDetector and apply non-max suppression using HARRIS_KERNEL.
class TunedHarrisDetector(HarrisKeypointDetector):
def detectKeypoints(self, img):
HARRIS_THRESHOLD = 0.08
# 1. Call the base detector to get keypoints.
keypoints = super().detectKeypoints(img)
# 2. Convert keypoints to an array with (x, y, response).
keypoints_len = len(keypoints)
keypoints2array=np.zeros((keypoints_len,3))
for i in range(keypoints_len):
keypoints2array[i,0],keypoints2array[i,1] = keypoints[i].pt
keypoints2array[i,2] = keypoints[i].response
# 3. Sort by response and apply non-maximum suppression using HARRIS_KERNEL.
sorted_indices = np.argsort(keypoints2array[:, 2]) # 获取第三列的索引排序
keypointsArraySorted = keypoints2array[sorted_indices] # 依据排序后的索引重排数组
keypointsSorted = [keypoints[i] for i in sorted_indices]
# 4. Return only keypoints with response > HARRIS_THRESHOLD.
for j in range(keypoints_len):
# print(keypointsSorted[j].response)
if keypointsSorted[j].response > HARRIS_THRESHOLD:
break
return keypointsSorted[j:]
pass # Fill in your implementation.
# Your RatioTestMatcher should perform ratio test feature matching.
class RatioTestMatcher:
MATCH_RATIO_THRESH = 0.9
def matchFeatures(self, desc1, desc2, ratio_thresh=MATCH_RATIO_THRESH):
# 1. Compute the Euclidean distance matrix between desc1 and desc2.
# 2. For each descriptor, find the closest two matches and apply a ratio test with ratio_thresh.
distances = spatial.distance.cdist(desc1,desc2,'euclidean')
indices = np.argmin(distances,axis=1) #对于每一个desc1,desc2中哪一个和他的距离最小
amount = desc1.shape[0]
distances_sorted_within_one_row = np.sort(distances)
sort_indices = np.argsort(distances_sorted_within_one_row[:,0])
distances_sorted_both = distances_sorted_within_one_row[sort_indices]
two_nearest = distances_sorted_both[:,:2]
matches = []
ratio_distance = np.where(two_nearest[:,1] != 0,two_nearest[:,0] / two_nearest[:,1] , 0)
for i in range(amount):
if ratio_distance[i] < ratio_thresh:
matches.append(cv2.DMatch(sort_indices[i],indices[sort_indices[i]],distances_sorted_both[i,0]))
#接下来再对于matches排序,选出最匹配的几组
return matches
pass # Fill in your implementation.
# Drawing functions: draw keypoints and match lines with circles.
def draw_harris_keypoints(image, keypoints, output_path):
keypoints_len = len(keypoints)
image_dup = np.copy(image)
for i in range(keypoints_len):
cv2.drawMarker(image_dup,(int(keypoints[i].pt[0]),int(keypoints[i].pt[1])),color=(0,255,0))
cv2.imwrite(output_path,image_dup)
# For each keypoint, draw a cross marker using cv2.drawMarker.
pass # Fill in your implementation.
def draw_matches(img1, kp1, img2, kp2, matches, output_path, max_matches=50):
GREEN = (0, 255, 0)
RADIUS = 5
# Concatenate images, then for each match draw a line and circles (radius fixed to RADIUS).
combined_array = np.hstack((img1, img2))
width = img1.shape[1]
matches_len = len(matches)
print("Total match amount:",matches_len)
for i in range(matches_len):
if i < max_matches:
pt1 = kp1[matches[i].queryIdx]
pt2 = kp2[matches[i].trainIdx]
x1,y1 = pt1.pt
x2,y2 = pt2.pt
cv2.line(combined_array, (int(x1),int(y1)), (int(x2+width),int(y2)),color=(0,0,255))
cv2.circle(combined_array,(int(x1),int(y1)),RADIUS,GREEN)
cv2.circle(combined_array,(int(x2+width),int(y2)),RADIUS,GREEN)
else:
break
cv2.imwrite(output_path,combined_array)
pass # Fill in your implementation.
if __name__ == "__main__":
# 1. Initialize your modules (TunedHarrisDetector, MOPSFeatureDescriptor, RatioTestMatcher).
THD = TunedHarrisDetector() #from image to keypoints
MOP = MOPSFeatureDescriptor() #from keypoints to desc
RTM = RatioTestMatcher() #from desc to mathes
# 2. Read left.jpg and right.jpg from the images folder.
imageL = cv2.imread("images/left.jpg")
imageR = cv2.imread("images/right.jpg")
# 3. Detect keypoints, compute descriptors, and match features.
keyL = THD.detectKeypoints(imageL)
keyR = THD.detectKeypoints(imageR)
descL = MOP.describeFeatures(imageL,keyL)
descR = MOP.describeFeatures(imageR,keyR)
match = RTM.matchFeatures(descL,descR)
# 4. Draw and save the keypoints and match results.
draw_harris_keypoints(imageL,keyL,"images/left_harris.jpg")
draw_harris_keypoints(imageR,keyR,"images/right_harris.jpg")
draw_matches(imageL,keyL,imageR,keyR,match,"images/matches.jpg")