blob: 6a340f6ea9d9086229edf3e2b7c19100d361dfbf (
plain) (
tree)
|
|
import cv2
import numpy as np
# Load the two images
image1 = cv2.imread("T1.jpg")
image2 = cv2.imread("T2.jpg")
# Convert images to grayscale
gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
# Initialize SIFT detector
sift = cv2.SIFT_create()
# Find keypoints and descriptors
keypoints1, descriptors1 = sift.detectAndCompute(gray1, None)
keypoints2, descriptors2 = sift.detectAndCompute(gray2, None)
# Initialize keypoint matcher
matcher = cv2.BFMatcher()
# Match descriptors
matches = matcher.match(descriptors1, descriptors2)
# Sort matches by distance
matches = sorted(matches, key=lambda x: x.distance)
# Extract matched keypoints
points1 = np.float32([keypoints1[m.queryIdx].pt for m in matches])
points2 = np.float32([keypoints2[m.trainIdx].pt for m in matches])
# Estimate transformation matrix (homography) using RANSAC
homography, _ = cv2.findHomography(points1, points2, cv2.RANSAC)
# Warp image1 to align with image2
aligned_image1 = cv2.warpPerspective(
image1, homography, (image2.shape[1], image2.shape[0])
)
# Display and save the aligned image
cv2.imwrite("T1p.jpg", aligned_image1)
cv2.waitKey(0)
cv2.destroyAllWindows()
|