diff options
Diffstat (limited to 'opencv/code')
-rw-r--r-- | opencv/code/a1.py | 29 | ||||
-rw-r--r-- | opencv/code/a10.py | 39 | ||||
-rw-r--r-- | opencv/code/a11.py | 29 | ||||
-rw-r--r-- | opencv/code/a12.py | 26 | ||||
-rw-r--r-- | opencv/code/a13.py | 9 | ||||
-rw-r--r-- | opencv/code/a14.py | 53 | ||||
-rw-r--r-- | opencv/code/a2.py | 26 | ||||
-rw-r--r-- | opencv/code/a3.py | 14 | ||||
-rw-r--r-- | opencv/code/a4.py | 17 | ||||
-rw-r--r-- | opencv/code/a5.py | 42 | ||||
-rw-r--r-- | opencv/code/a6.py | 43 | ||||
-rw-r--r-- | opencv/code/a7.py | 26 | ||||
-rw-r--r-- | opencv/code/a8.py | 10 | ||||
-rw-r--r-- | opencv/code/a9.py | 10 | ||||
-rw-r--r-- | opencv/code/align.py | 43 |
15 files changed, 416 insertions, 0 deletions
diff --git a/opencv/code/a1.py b/opencv/code/a1.py new file mode 100644 index 0000000..1f00db7 --- /dev/null +++ b/opencv/code/a1.py @@ -0,0 +1,29 @@ +import cv2 +import numpy as np +import os + +os.chdir("images") + +image = cv2.imread("1i.png") + +gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) + +_, graytobin = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY) + +_, rgbtobin = cv2.threshold(image, 150, 255, cv2.THRESH_BINARY) + +hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) + +hsvtorgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) + +rcb = cv2.cvtColor(image, cv2.COLOR_BGR2YCR_CB) + +rcbtorgb = cv2.cvtColor(rcb, cv2.COLOR_YCrCb2BGR) + +cv2.imwrite("1.1.png", gray) +cv2.imwrite("1.2.png", hsv) +cv2.imwrite("1.3.png", rcb) +cv2.imwrite("1.4.png", hsvtorgb) +cv2.imwrite("1.5.png", rcbtorgb) +cv2.imwrite("1.6.png", graytobin) +cv2.imwrite("1.7.png", rgbtobin) diff --git a/opencv/code/a10.py b/opencv/code/a10.py new file mode 100644 index 0000000..f54a024 --- /dev/null +++ b/opencv/code/a10.py @@ -0,0 +1,39 @@ +import cv2 +import numpy as np +import matplotlib.pyplot as plt + +images = [cv2.imread(path, cv2.IMREAD_GRAYSCALE) for path in ["9i1.jpg", "9i2.jpg"]] +assert all( + image.shape == images[0].shape for image in images +), "Input images must have the same dimensions" + +operations = [ + ("Addition", lambda x, y: cv2.add(x, y)), + ("Subtraction", lambda x, y: cv2.subtract(x, y)), + ("Multiplication", lambda x, y: cv2.multiply(x, y)), + ( + "Division", + lambda x, y: cv2.divide( + x.astype(np.float32), np.where(y == 0, 1, y).astype(np.float32) + ), + ), +] + +plt.figure(figsize=(8, 6)) + +for i, image in enumerate(images, start=1): + plt.subplot(3, 2, i) + plt.imshow(image, cmap="gray") + plt.title(f"Image {i}") + plt.axis("off") + +for i, (operation_name, operation_function) in enumerate( + operations, start=len(images) + 1 +): + result_image = operation_function(*images) + plt.subplot(3, 2, i) + plt.imshow(result_image, cmap="gray") + plt.title(operation_name) + plt.axis("off") + +plt.savefig("10.svg") diff --git a/opencv/code/a11.py b/opencv/code/a11.py new file mode 100644 index 0000000..dc4c436 --- /dev/null +++ b/opencv/code/a11.py @@ -0,0 +1,29 @@ +import cv2 +import numpy as np +import matplotlib.pyplot as plt +from skimage.util import random_noise + +image = cv2.imread("4igr.jpg", cv2.IMREAD_GRAYSCALE) + +# Add salt and pepper noise +salt_pepper_noise = random_noise(image, mode="s&p", amount=0.02) + +# Add Gaussian noise +gaussian_noise = random_noise(image, mode="gaussian", mean=0, var=0.01) + +# Create subplots +plt.figure(figsize=(10, 5)) + +# Salt and pepper noise +plt.subplot(1, 2, 1) +plt.imshow(salt_pepper_noise, cmap="gray") +plt.title("Salt and Pepper Noise") +plt.axis("off") + +# Gaussian noise +plt.subplot(1, 2, 2) +plt.imshow(gaussian_noise, cmap="gray") +plt.title("Gaussian Noise") +plt.axis("off") + +plt.savefig("11.svg") diff --git a/opencv/code/a12.py b/opencv/code/a12.py new file mode 100644 index 0000000..9cf8858 --- /dev/null +++ b/opencv/code/a12.py @@ -0,0 +1,26 @@ +import cv2 +import numpy as np +import matplotlib.pyplot as plt + +input_image = cv2.imread("4i.jpg", cv2.IMREAD_GRAYSCALE) + +kernel_size = 3 + +filters = [ + (input_image, "Original Image"), + (cv2.blur(input_image, (kernel_size, kernel_size)), "Mean Filtered"), + (cv2.GaussianBlur(input_image, (kernel_size, kernel_size), 0), "Weighted Average Filtered"), + (cv2.medianBlur(input_image, kernel_size), "Median Filtered"), + (cv2.dilate(input_image, np.ones((kernel_size, kernel_size), np.uint8)), "Max Filtered"), + (cv2.erode(input_image, np.ones((kernel_size, kernel_size), np.uint8)), "Min Filtered"), +] + +plt.figure(figsize=(12, 10)) + +for i, (filtered_image, title) in enumerate(filters, start=1): + plt.subplot(3, 3, i) + plt.imshow(filtered_image, cmap="gray") + plt.title(title) + plt.axis("off") + +plt.savefig("12.svg") diff --git a/opencv/code/a13.py b/opencv/code/a13.py new file mode 100644 index 0000000..3afdf1a --- /dev/null +++ b/opencv/code/a13.py @@ -0,0 +1,9 @@ +import cv2 + +image = cv2.imread("1i.png", cv2.IMREAD_GRAYSCALE) + +threshold_values = [50, 100, 150] + +for i, threshold in enumerate(threshold_values, start=1): + _, segmented_image = cv2.threshold(image, threshold, 255, cv2.THRESH_BINARY) + cv2.imwrite(f"13.{i}.jpg", segmented_image) diff --git a/opencv/code/a14.py b/opencv/code/a14.py new file mode 100644 index 0000000..ec04f27 --- /dev/null +++ b/opencv/code/a14.py @@ -0,0 +1,53 @@ +from matplotlib import pyplot as plt +import cv2 +import numpy as np + +img = cv2.imread("1i.png", 0) +laplacian = cv2.Laplacian(img, -1, None, 3) + +robert_x_kernel = np.array([[-1, 0], [0, 1]]) +robert_y_kernel = np.array([[0, -1], [1, 0]]) +robert_x = cv2.filter2D(img, -1, robert_x_kernel) +robert_y = cv2.filter2D(img, -1, robert_y_kernel) +robert_combined = cv2.bitwise_or(robert_x, robert_y) + +prewitt_x_kernel = np.array([[-1, -1, -1], [0, 0, 0], [1, 1, 1]]) +prewitt_y_kernel = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]]) +prewitt_x = cv2.filter2D(img, -1, prewitt_x_kernel) +prewitt_y = cv2.filter2D(img, -1, prewitt_y_kernel) +prewitt_combined = cv2.bitwise_or(prewitt_x, prewitt_y) + +sx = cv2.Sobel(img, cv2.CV_64F, 1, 0) +sy = cv2.Sobel(img, cv2.CV_64F, 0, 1) +sobel_x = np.uint8(np.absolute(sx)) +sobel_y = np.uint8(np.absolute(sy)) +sobel_combined = cv2.bitwise_or(sobel_x, sobel_y) + +empty = np.zeros(img.shape, dtype=np.uint8) + +data = [ + (img, "Original Image"), + (laplacian, "Laplacian"), + (robert_x, "Robert in x direction"), + (robert_y, "Robert in y direction"), + (robert_combined, "Combined Robert"), + (prewitt_x, "Prewitt in x direction"), + (prewitt_y, "Prewitt in y direction"), + (prewitt_combined, "Combined Prewitt"), + (sobel_x, "Sobel in x direction"), + (sobel_y, "Sobel in y direction"), + (sobel_combined, "Combined Sobel"), + (empty, ""), +] + +fig, axs = plt.subplots(3, 4, figsize=(10, 7)) + +for ax, (image, title) in zip(axs.flat, data): + if image is not img: + image = 255 - image + ax.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) + ax.set_title(title, fontsize=10) + ax.axis("off") + +plt.tight_layout() +plt.savefig("14.svg") diff --git a/opencv/code/a2.py b/opencv/code/a2.py new file mode 100644 index 0000000..f0bfc14 --- /dev/null +++ b/opencv/code/a2.py @@ -0,0 +1,26 @@ +import cv2 +import numpy as np + +image = cv2.imread("1i.png") + +angle = float(input("Enter angle in degrees: ")) + +height, width = image.shape[:2] + +rotation_matrix = cv2.getRotationMatrix2D((width / 2, height / 2), angle, 1) + +# Determine the new dimensions of the rotated image +cos_theta = np.abs(rotation_matrix[0, 0]) +sin_theta = np.abs(rotation_matrix[0, 1]) +new_width = int(height * sin_theta + width * cos_theta) +new_height = int(height * cos_theta + width * sin_theta) + +# Adjust the rotation matrix to take into account translation to keep the entire image within bounds +rotation_matrix[0, 2] += (new_width - width) / 2 +rotation_matrix[1, 2] += (new_height - height) / 2 + +rotated_image = cv2.warpAffine(image, rotation_matrix, (new_width, new_height)) + +cv2.imwrite(f"2.({angle}).png", rotated_image) +cv2.waitKey(0) +cv2.destroyAllWindows() diff --git a/opencv/code/a3.py b/opencv/code/a3.py new file mode 100644 index 0000000..5a8ffcd --- /dev/null +++ b/opencv/code/a3.py @@ -0,0 +1,14 @@ +import cv2 +import matplotlib.pyplot as plt + +image = cv2.imread("1i.png", cv2.IMREAD_GRAYSCALE) + +histogram = cv2.calcHist([image], [0], None, [256], [0, 256]) + +plt.figure(figsize=(8, 5)) +plt.plot(histogram, color="black") +plt.title("Grayscale Histogram") +plt.xlabel("Pixel Value") +plt.ylabel("Frequency") +plt.xlim([0, 256]) +plt.savefig("3.svg") diff --git a/opencv/code/a4.py b/opencv/code/a4.py new file mode 100644 index 0000000..dbd14e9 --- /dev/null +++ b/opencv/code/a4.py @@ -0,0 +1,17 @@ +import cv2 +import numpy as np + +image = cv2.imread("4i.jpg") + +blue_channel, green_channel, red_channel = cv2.split(image) + +zeros = np.zeros_like(blue_channel) + +# Merge each channel with zeroes for the other channels +blue_image = cv2.merge([blue_channel, zeros, zeros]) +green_image = cv2.merge([zeros, green_channel, zeros]) +red_image = cv2.merge([zeros, zeros, red_channel]) + +cv2.imwrite("4.b.jpg", blue_image) +cv2.imwrite("4.g.jpg", green_image) +cv2.imwrite("4.r.jpg", red_image) diff --git a/opencv/code/a5.py b/opencv/code/a5.py new file mode 100644 index 0000000..52e56c5 --- /dev/null +++ b/opencv/code/a5.py @@ -0,0 +1,42 @@ +import cv2 +import numpy as np + +image = cv2.imread("4i.jpg", cv2.IMREAD_GRAYSCALE) + +# Image Negative +negative_image = 255 - image +cv2.imwrite("5.negative.jpg", negative_image) + +# Log Transformation +c = 255 / np.log(1 + np.max(image)) +log_transformed = c * np.log1p(1.0 + image) +log_transformed = np.uint8(log_transformed) +cv2.imwrite("5.log.jpg", log_transformed) + +# Power Law Transform +gamma = 0.5 +power_law_transformed = np.power(image, gamma) +power_law_transformed = cv2.normalize( + power_law_transformed, None, 0, 255, cv2.NORM_MINMAX +) +power_law_transformed = np.uint8(power_law_transformed) +cv2.imwrite("5.power_law.jpg", power_law_transformed) + + +# Piecewise Linear Transform +def piecewise_linear(x): + return np.piecewise( + x, + [x < 50, (x >= 50) & (x < 100), (x >= 100) & (x < 150), x >= 150], + [ + lambda x: 0, + lambda x: 255 * ((x - 50) / (100 - 50)), + lambda x: 255, + lambda x: 255 * ((255 - x) / (255 - 150)), + ], + ) + + +piecewise_transformed = piecewise_linear(image) +piecewise_transformed = np.uint8(piecewise_transformed) +cv2.imwrite("5.piecewise.jpg", piecewise_transformed) diff --git a/opencv/code/a6.py b/opencv/code/a6.py new file mode 100644 index 0000000..a72d103 --- /dev/null +++ b/opencv/code/a6.py @@ -0,0 +1,43 @@ +import cv2 +import matplotlib.pyplot as plt + +image = cv2.imread("4i.jpg", cv2.IMREAD_GRAYSCALE) + +# Perform histogram equalization +equalized_image = cv2.equalizeHist(image) + +# Calculate histograms +hist_original = cv2.calcHist([image], [0], None, [256], [0, 256]) +hist_equalized = cv2.calcHist([equalized_image], [0], None, [256], [0, 256]) + +# Plot original and equalized images and their histograms +plt.figure(figsize=(10, 8)) + +# Original image and histogram +plt.subplot(2, 2, 1) +plt.imshow(image, cmap="gray") +plt.title("Original Image") +plt.xticks([]) +plt.yticks([]) + +plt.subplot(2, 2, 2) +plt.plot(hist_original, color="black") +plt.title("Histogram of Original Image") +plt.xlabel("Pixel Value") +plt.ylabel("Frequency") + +# Equalized image and histogram +plt.subplot(2, 2, 3) +plt.imshow(equalized_image, cmap="gray") +plt.title("Equalized Image") +plt.xticks([]) +plt.yticks([]) + +plt.subplot(2, 2, 4) +plt.plot(hist_equalized, color="black") +plt.title("Histogram of Equalized Image") +plt.xlabel("Pixel Value") +plt.ylabel("Frequency") + +plt.tight_layout() +plt.savefig("6.svg") diff --git a/opencv/code/a7.py b/opencv/code/a7.py new file mode 100644 index 0000000..d9fbb0c --- /dev/null +++ b/opencv/code/a7.py @@ -0,0 +1,26 @@ +import cv2 +import numpy as np + +image = cv2.imread("4i.jpg", cv2.IMREAD_GRAYSCALE) + +# a) Brightness enhancement +brightness_enhanced = cv2.add(image, 50) +cv2.imwrite("7.a.jpg", brightness_enhanced) + +# b) Brightness suppression +brightness_suppressed = cv2.subtract(image, 50) +cv2.imwrite("7.b.jpg", brightness_suppressed) + +# c) Contrast manipulation +alpha = 1.5 +contrast_adjusted = cv2.multiply(image, alpha) +cv2.imwrite("7.c.jpg", contrast_adjusted) + +# d) Gray level slicing without background +lower_threshold = 100 +upper_threshold = 200 +gray_level_sliced = np.copy(image) +gray_level_sliced[ + (gray_level_sliced >= lower_threshold) & (gray_level_sliced <= upper_threshold) +] = 255 +cv2.imwrite("7.d.jpg", gray_level_sliced) diff --git a/opencv/code/a8.py b/opencv/code/a8.py new file mode 100644 index 0000000..d1cdfb5 --- /dev/null +++ b/opencv/code/a8.py @@ -0,0 +1,10 @@ +import cv2 + +image1 = cv2.imread("9i1.jpg", cv2.IMREAD_GRAYSCALE) +image2 = cv2.imread("9i2.jpg", cv2.IMREAD_GRAYSCALE) + +assert image1.shape == image2.shape, "Input images must have the same dimensions" + +averaged_image = cv2.addWeighted(image1, 0.5, image2, 0.5, 0) + +cv2.imwrite("8o.jpg", averaged_image) diff --git a/opencv/code/a9.py b/opencv/code/a9.py new file mode 100644 index 0000000..fab1a4e --- /dev/null +++ b/opencv/code/a9.py @@ -0,0 +1,10 @@ +import cv2 + +image1 = cv2.imread("9i1.jpg", cv2.IMREAD_GRAYSCALE) +image2 = cv2.imread("9i2.jpg", cv2.IMREAD_GRAYSCALE) + +assert image1.shape == image2.shape, "Input images must have the same dimensions" + +result_image = cv2.subtract(image1, image2) + +cv2.imwrite("9o.jpg", 255 - result_image) diff --git a/opencv/code/align.py b/opencv/code/align.py new file mode 100644 index 0000000..6a340f6 --- /dev/null +++ b/opencv/code/align.py @@ -0,0 +1,43 @@ +import cv2 +import numpy as np + +# Load the two images +image1 = cv2.imread("T1.jpg") +image2 = cv2.imread("T2.jpg") + +# Convert images to grayscale +gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY) +gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY) + +# Initialize SIFT detector +sift = cv2.SIFT_create() + +# Find keypoints and descriptors +keypoints1, descriptors1 = sift.detectAndCompute(gray1, None) +keypoints2, descriptors2 = sift.detectAndCompute(gray2, None) + +# Initialize keypoint matcher +matcher = cv2.BFMatcher() + +# Match descriptors +matches = matcher.match(descriptors1, descriptors2) + +# Sort matches by distance +matches = sorted(matches, key=lambda x: x.distance) + +# Extract matched keypoints +points1 = np.float32([keypoints1[m.queryIdx].pt for m in matches]) +points2 = np.float32([keypoints2[m.trainIdx].pt for m in matches]) + +# Estimate transformation matrix (homography) using RANSAC +homography, _ = cv2.findHomography(points1, points2, cv2.RANSAC) + +# Warp image1 to align with image2 +aligned_image1 = cv2.warpPerspective( + image1, homography, (image2.shape[1], image2.shape[0]) +) + +# Display and save the aligned image +cv2.imwrite("T1p.jpg", aligned_image1) +cv2.waitKey(0) +cv2.destroyAllWindows() |