Project

General

Profile

Files » calibration.py

FUJITA Ryusei, 01/08/2026 01:48 PM

 
1
import cv2
2
import numpy as np
3
import time
4
from typing import Tuple, Optional
5

    
6
class CalibrationManager:
7
    """!
8
    @brief Handles auto-calibration of projector overlap using a camera.
9
    
10
    This class provides methods to capture images from a camera, detect features,
11
    calculate the overlap between two projected images, and simulate the calibration process.
12
    """
13
    def __init__(self, camera_index: int = 0):
14
        """!
15
        @brief Initializes the CalibrationManager.
16

    
17
        Sets up the SIFT detector and FLANN matcher for feature matching.
18

    
19
        @param camera_index The index of the camera to use. Default is 0.
20
        """
21
        self.camera_index = camera_index
22
        self.sift = cv2.SIFT_create()
23
        # FLANN parameters for matcher
24
        FLANN_INDEX_KDTREE = 1
25
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
26
        search_params = dict(checks=50)
27
        self.flann = cv2.FlannBasedMatcher(index_params, search_params)
28

    
29
    def capture_image(self) -> Optional[np.ndarray]:
30
        """!
31
        @brief Captures a single frame from the camera.
32
        
33
        Attempts to open the camera, warm it up, and capture a frame.
34

    
35
        @return The captured frame as a numpy array, or None if capture fails.
36
        """
37
        cap = cv2.VideoCapture(self.camera_index)
38
        if not cap.isOpened():
39
            print(f"Error: Could not open camera {self.camera_index}")
40
            return None
41
        
42
        # Warmup
43
        for _ in range(10):
44
            cap.read()
45
            
46
        ret, frame = cap.read()
47
        cap.release()
48
        
49
        if not ret:
50
            print("Error: Could not read frame")
51
            return None
52
            
53
        return frame
54

    
55
    def calculate_overlap(self, img_left: np.ndarray, img_right: np.ndarray) -> Optional[int]:
56
        """!
57
        @brief Calculates the horizontal overlap between two images using SIFT features.
58

    
59
        Detects SIFT features in both images, matches them using FLANN, filters matches
60
        using Lowe's ratio test, and calculates the average horizontal shift.
61

    
62
        @param img_left The captured image when ONLY the left projector is on.
63
        @param img_right The captured image when ONLY the right projector is on.
64
        @return The calculated average horizontal shift (overlap) in pixels, or None if calculation fails.
65
        """
66
        # Convert to grayscale
67
        gray_left = cv2.cvtColor(img_left, cv2.COLOR_BGR2GRAY)
68
        gray_right = cv2.cvtColor(img_right, cv2.COLOR_BGR2GRAY)
69

    
70
        # Detect keypoints and descriptors
71
        kp1, des1 = self.sift.detectAndCompute(gray_left, None)
72
        kp2, des2 = self.sift.detectAndCompute(gray_right, None)
73

    
74
        if des1 is None or des2 is None or len(kp1) < 2 or len(kp2) < 2:
75
            print("Not enough features detected.")
76
            return None
77

    
78
        # Match descriptors
79
        matches = self.flann.knnMatch(des1, des2, k=2)
80

    
81
        # Lowe's ratio test
82
        good_matches = []
83
        for m, n in matches:
84
            if m.distance < 0.7 * n.distance:
85
                good_matches.append(m)
86

    
87
        if len(good_matches) < 4:
88
            print("Not enough good matches found.")
89
            return None
90

    
91
        # Extract location of good matches
92
        pts_left = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
93
        pts_right = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
94

    
95
        # Find Homography
96
        H, mask = cv2.findHomography(pts_left, pts_right, cv2.RANSAC, 5.0)
97
        
98
        if H is None:
99
            print("Could not find homography.")
100
            return None
101

    
102
        # Let's calculate the average horizontal shift of the matches.
103
        matches_mask = mask.ravel().tolist()
104
        dx_list = []
105
        for i, match in enumerate(good_matches):
106
            if matches_mask[i]:
107
                # pt_left is the position of the feature in the Left-Projector-Only capture
108
                # pt_right is the position of the feature in the Right-Projector-Only capture
109
                p1 = kp1[match.queryIdx].pt
110
                p2 = kp2[match.trainIdx].pt
111
                # We want p1 and p2 to be the same if aligned.
112
                dx = p2[0] - p1[0]
113
                dx_list.append(dx)
114
        
115
        if not dx_list:
116
            return 0
117
            
118
        avg_dx = np.median(dx_list)
119
        return avg_dx
120

    
121
    def simulate_calibration(self, image_width: int, current_overlap: int) -> int:
122
        """!
123
        @brief Simulates the calibration process.
124

    
125
        Calculates a simulated adjustment to the overlap based on a target overlap
126
        and some random noise.
127

    
128
        @param image_width The width of the image.
129
        @param current_overlap The current overlap value.
130
        @return The suggested adjustment to the overlap (positive or negative integer).
131
        """
132
        # Simulation logic:
133
        # Assume the "perfect" overlap is 100 pixels.
134
        # If current is 75, we are off by 25.
135
        # We return the error.
136
        target = 100
137
        error = target - current_overlap
138
        # Add some noise
139
        noise = np.random.randint(-2, 3)
140
        return error + noise
(2-2/13)