Codes » History » Version 7
Zhi Jie YEW, 11/06/2025 04:16 PM
| 1 | 2 | Anderson PHILLIP | [[Wiki|← Back to Start Page]] |
|---|---|---|---|
| 2 | |||
| 3 | 1 | Wing Sum TANG | h1. Codes |
| 4 | 3 | Zhi Jie YEW | |
| 5 | 6 | Zhi Jie YEW | h2. gui.py |
| 6 | 4 | Zhi Jie YEW | <pre><code class="python"> |
| 7 | 3 | Zhi Jie YEW | # gui.py |
| 8 | |||
| 9 | import tkinter as tk |
||
| 10 | from tkinter import ttk, filedialog, messagebox |
||
| 11 | import threading |
||
| 12 | import json |
||
| 13 | import os |
||
| 14 | |||
| 15 | # Import the logic classes |
||
| 16 | from main_alpha_blender import MainAlphaBlender |
||
| 17 | from video_processor import VideoProcessor |
||
| 18 | |||
| 19 | class BlenderGUI: |
||
| 20 | """A Tkinter GUI with tabs for image and video edge blending.""" |
||
| 21 | def __init__(self, master): |
||
| 22 | self.master = master |
||
| 23 | master.title("Image and Video Edge Blender") |
||
| 24 | master.geometry("600x450") # Increased height for new buttons |
||
| 25 | |||
| 26 | # --- Create a Tabbed Interface --- |
||
| 27 | self.notebook = ttk.Notebook(master) |
||
| 28 | self.notebook.pack(pady=10, padx=10, fill="both", expand=True) |
||
| 29 | |||
| 30 | self.image_tab = ttk.Frame(self.notebook, padding="10") |
||
| 31 | self.video_tab = ttk.Frame(self.notebook, padding="10") |
||
| 32 | |||
| 33 | self.notebook.add(self.image_tab, text="Image Blender") |
||
| 34 | self.notebook.add(self.video_tab, text="Video Processor") |
||
| 35 | |||
| 36 | # --- Populate each tab --- |
||
| 37 | self.create_image_widgets() |
||
| 38 | self.create_video_widgets() |
||
| 39 | |||
| 40 | # --- NEW: Add a frame at the bottom for config management --- |
||
| 41 | self.config_frame = ttk.Frame(master, padding=(10, 0, 10, 10)) |
||
| 42 | self.config_frame.pack(fill=tk.X, side=tk.BOTTOM) |
||
| 43 | self.create_config_widgets() |
||
| 44 | |||
| 45 | # --- NEW: Load default config on startup --- |
||
| 46 | # It will silently fail if config.json doesn't exist, using hardcoded defaults. |
||
| 47 | self.load_config(filepath="config.json", silent=True) |
||
| 48 | |||
| 49 | def create_image_widgets(self): |
||
| 50 | """Creates all widgets for the Image Blender tab.""" |
||
| 51 | self.image_blender = MainAlphaBlender() |
||
| 52 | |||
| 53 | ttk.Label(self.image_tab, text="Input Image Directory:").grid(row=0, column=0, sticky=tk.W, pady=2) |
||
| 54 | self.img_input_path_var = tk.StringVar(value=self.image_blender.image_path) |
||
| 55 | ttk.Entry(self.image_tab, textvariable=self.img_input_path_var, width=50).grid(row=0, column=1, sticky=tk.EW, padx=5) |
||
| 56 | ttk.Button(self.image_tab, text="Browse...", command=self.select_img_input_dir).grid(row=0, column=2) |
||
| 57 | |||
| 58 | ttk.Label(self.image_tab, text="Output Directory:").grid(row=1, column=0, sticky=tk.W, pady=2) |
||
| 59 | self.img_output_path_var = tk.StringVar(value=self.image_blender.output_dir) |
||
| 60 | ttk.Entry(self.image_tab, textvariable=self.img_output_path_var, width=50).grid(row=1, column=1, sticky=tk.EW, padx=5) |
||
| 61 | ttk.Button(self.image_tab, text="Browse...", command=self.select_img_output_dir).grid(row=1, column=2) |
||
| 62 | |||
| 63 | ttk.Label(self.image_tab, text="Blend Width (pixels):").grid(row=2, column=0, sticky=tk.W, pady=5) |
||
| 64 | self.img_blend_width_var = tk.IntVar(value=self.image_blender.blend_width) |
||
| 65 | ttk.Entry(self.image_tab, textvariable=self.img_blend_width_var, width=10).grid(row=2, column=1, sticky=tk.W, padx=5) |
||
| 66 | |||
| 67 | ttk.Label(self.image_tab, text="Gamma Value:").grid(row=3, column=0, sticky=tk.W, pady=2) |
||
| 68 | self.img_gamma_var = tk.DoubleVar(value=self.image_blender.gamma_value) |
||
| 69 | ttk.Entry(self.image_tab, textvariable=self.img_gamma_var, width=10).grid(row=3, column=1, sticky=tk.W, padx=5) |
||
| 70 | |||
| 71 | ttk.Label(self.image_tab, text="Blend Method:").grid(row=4, column=0, sticky=tk.W, pady=2) |
||
| 72 | self.img_method_var = tk.StringVar(value=self.image_blender.method) |
||
| 73 | methods = ['linear', 'cosine', 'quadratic', 'sqrt', 'log', 'sigmoid'] |
||
| 74 | ttk.Combobox(self.image_tab, textvariable=self.img_method_var, values=methods, state="readonly").grid(row=4, column=1, sticky=tk.W, padx=5) |
||
| 75 | |||
| 76 | self.img_preview_var = tk.BooleanVar(value=self.image_blender.preview) |
||
| 77 | ttk.Checkbutton(self.image_tab, text="Show Preview After Processing", variable=self.img_preview_var).grid(row=5, column=1, sticky=tk.W, pady=10, padx=5) |
||
| 78 | |||
| 79 | ttk.Button(self.image_tab, text="Run Blending Process", command=self.run_image_blending).grid(row=6, column=1, pady=20, sticky=tk.W) |
||
| 80 | |||
| 81 | self.img_status_var = tk.StringVar(value="Ready.") |
||
| 82 | ttk.Label(self.image_tab, textvariable=self.img_status_var, font=("Helvetica", 10, "italic")).grid(row=7, column=0, columnspan=3, sticky=tk.W, pady=5) |
||
| 83 | |||
| 84 | self.image_tab.columnconfigure(1, weight=1) |
||
| 85 | |||
| 86 | def create_video_widgets(self): |
||
| 87 | """Creates all widgets for the Video Processor tab.""" |
||
| 88 | self.video_processor = VideoProcessor() |
||
| 89 | |||
| 90 | ttk.Label(self.video_tab, text="Input Video File:").grid(row=0, column=0, sticky=tk.W, pady=2) |
||
| 91 | self.vid_input_path_var = tk.StringVar() |
||
| 92 | ttk.Entry(self.video_tab, textvariable=self.vid_input_path_var, width=50).grid(row=0, column=1, sticky=tk.EW, padx=5) |
||
| 93 | ttk.Button(self.video_tab, text="Browse...", command=self.select_vid_input_file).grid(row=0, column=2) |
||
| 94 | |||
| 95 | ttk.Label(self.video_tab, text="Output Directory:").grid(row=1, column=0, sticky=tk.W, pady=2) |
||
| 96 | self.vid_output_path_var = tk.StringVar(value=self.video_processor.output_dir) |
||
| 97 | ttk.Entry(self.video_tab, textvariable=self.vid_output_path_var, width=50).grid(row=1, column=1, sticky=tk.EW, padx=5) |
||
| 98 | ttk.Button(self.video_tab, text="Browse...", command=self.select_vid_output_dir).grid(row=1, column=2) |
||
| 99 | |||
| 100 | ttk.Label(self.video_tab, text="Blend Width (pixels):").grid(row=2, column=0, sticky=tk.W, pady=5) |
||
| 101 | self.vid_blend_width_var = tk.IntVar(value=self.video_processor.blend_width) |
||
| 102 | ttk.Entry(self.video_tab, textvariable=self.vid_blend_width_var, width=10).grid(row=2, column=1, sticky=tk.W, padx=5) |
||
| 103 | |||
| 104 | ttk.Label(self.video_tab, text="Blend Method:").grid(row=3, column=0, sticky=tk.W, pady=2) |
||
| 105 | self.vid_method_var = tk.StringVar(value=self.video_processor.blend_method) |
||
| 106 | methods = ['linear', 'cosine'] |
||
| 107 | ttk.Combobox(self.video_tab, textvariable=self.vid_method_var, values=methods, state="readonly").grid(row=3, column=1, sticky=tk.W, padx=5) |
||
| 108 | |||
| 109 | self.run_video_button = ttk.Button(self.video_tab, text="Process Video", command=self.run_video_processing_thread) |
||
| 110 | self.run_video_button.grid(row=4, column=1, pady=20, sticky=tk.W) |
||
| 111 | |||
| 112 | self.vid_status_var = tk.StringVar(value="Ready.") |
||
| 113 | ttk.Label(self.video_tab, textvariable=self.vid_status_var).grid(row=5, column=0, columnspan=3, sticky=tk.W, pady=5) |
||
| 114 | |||
| 115 | self.video_tab.columnconfigure(1, weight=1) |
||
| 116 | |||
| 117 | def create_config_widgets(self): |
||
| 118 | """Creates the Load and Save configuration buttons.""" |
||
| 119 | ttk.Button(self.config_frame, text="Load Config", command=self.load_config).pack(side=tk.LEFT, padx=5) |
||
| 120 | ttk.Button(self.config_frame, text="Save Config", command=self.save_config).pack(side=tk.LEFT, padx=5) |
||
| 121 | |||
| 122 | def load_config(self, filepath=None, silent=False): |
||
| 123 | """Loads settings from a JSON file and updates the GUI.""" |
||
| 124 | if filepath is None: |
||
| 125 | filepath = filedialog.askopenfilename( |
||
| 126 | title="Open Configuration File", |
||
| 127 | filetypes=[("JSON files", "*.json"), ("All files", "*.*")] |
||
| 128 | ) |
||
| 129 | |||
| 130 | if not filepath or not os.path.exists(filepath): |
||
| 131 | if not silent: |
||
| 132 | messagebox.showwarning("Load Config", "No configuration file selected or file not found.") |
||
| 133 | return |
||
| 134 | |||
| 135 | try: |
||
| 136 | with open(filepath, 'r') as f: |
||
| 137 | data = json.load(f) |
||
| 138 | |||
| 139 | # Update Image Tab variables |
||
| 140 | self.img_input_path_var.set(data.get("image_path", "OriginalImages")) |
||
| 141 | self.img_output_path_var.set(data.get("output_dir", "Results")) |
||
| 142 | self.img_blend_width_var.set(data.get("blend_width", 200)) |
||
| 143 | self.img_gamma_var.set(data.get("gamma_value", 1.4)) |
||
| 144 | self.img_method_var.set(data.get("blend_method", "cosine")) |
||
| 145 | self.img_preview_var.set(data.get("preview", True)) |
||
| 146 | |||
| 147 | # Update Video Tab variables |
||
| 148 | self.vid_input_path_var.set(data.get("video_input_path", "")) |
||
| 149 | self.vid_output_path_var.set(data.get("video_output_dir", "VideoResults")) |
||
| 150 | self.vid_blend_width_var.set(data.get("video_blend_width", 100)) |
||
| 151 | self.vid_method_var.set(data.get("video_blend_method", "linear")) |
||
| 152 | |||
| 153 | if not silent: |
||
| 154 | messagebox.showinfo("Load Config", f"Configuration loaded successfully from {os.path.basename(filepath)}.") |
||
| 155 | |||
| 156 | except Exception as e: |
||
| 157 | if not silent: |
||
| 158 | messagebox.showerror("Load Config Error", f"Failed to load or parse the configuration file.\n\nError: {e}") |
||
| 159 | |||
| 160 | def save_config(self): |
||
| 161 | """Saves the current GUI settings to a JSON file.""" |
||
| 162 | filepath = filedialog.asksaveasfilename( |
||
| 163 | title="Save Configuration File", |
||
| 164 | defaultextension=".json", |
||
| 165 | initialfile="config.json", |
||
| 166 | filetypes=[("JSON files", "*.json"), ("All files", "*.*")] |
||
| 167 | ) |
||
| 168 | |||
| 169 | if not filepath: |
||
| 170 | return |
||
| 171 | |||
| 172 | try: |
||
| 173 | config_data = { |
||
| 174 | # Image Tab settings |
||
| 175 | "image_path": self.img_input_path_var.get(), |
||
| 176 | "output_dir": self.img_output_path_var.get(), |
||
| 177 | "blend_width": self.img_blend_width_var.get(), |
||
| 178 | "gamma_value": self.img_gamma_var.get(), |
||
| 179 | "blend_method": self.img_method_var.get(), |
||
| 180 | "preview": self.img_preview_var.get(), |
||
| 181 | |||
| 182 | # Video Tab settings |
||
| 183 | "video_input_path": self.vid_input_path_var.get(), |
||
| 184 | "video_output_dir": self.vid_output_path_var.get(), |
||
| 185 | "video_blend_width": self.vid_blend_width_var.get(), |
||
| 186 | "video_blend_method": self.vid_method_var.get() |
||
| 187 | } |
||
| 188 | |||
| 189 | with open(filepath, 'w') as f: |
||
| 190 | json.dump(config_data, f, indent=4) |
||
| 191 | |||
| 192 | messagebox.showinfo("Save Config", f"Configuration saved successfully to {os.path.basename(filepath)}.") |
||
| 193 | |||
| 194 | except Exception as e: |
||
| 195 | messagebox.showerror("Save Config Error", f"Failed to save the configuration file.\n\nError: {e}") |
||
| 196 | |||
| 197 | # --- Callbacks for Image Tab --- |
||
| 198 | def select_img_input_dir(self): |
||
| 199 | path = filedialog.askdirectory(title="Select Input Image Directory") |
||
| 200 | if path: self.img_input_path_var.set(path) |
||
| 201 | |||
| 202 | def select_img_output_dir(self): |
||
| 203 | path = filedialog.askdirectory(title="Select Output Directory") |
||
| 204 | if path: self.img_output_path_var.set(path) |
||
| 205 | |||
| 206 | def run_image_blending(self): |
||
| 207 | self.image_blender.image_path = self.img_input_path_var.get() |
||
| 208 | self.image_blender.output_dir = self.img_output_path_var.get() |
||
| 209 | self.image_blender.blend_width = self.img_blend_width_var.get() |
||
| 210 | self.image_blender.gamma_value = self.img_gamma_var.get() |
||
| 211 | self.image_blender.method = self.img_method_var.get() |
||
| 212 | self.image_blender.preview = self.img_preview_var.get() |
||
| 213 | self.image_blender.update_paths() |
||
| 214 | |||
| 215 | success, message = self.image_blender.run() |
||
| 216 | if success: |
||
| 217 | self.img_status_var.set(f"Success! {message}") |
||
| 218 | messagebox.showinfo("Success", message) |
||
| 219 | else: |
||
| 220 | self.img_status_var.set(f"Error: {message}") |
||
| 221 | messagebox.showerror("Error", message) |
||
| 222 | |||
| 223 | # --- Callbacks for Video Tab --- |
||
| 224 | def select_vid_input_file(self): |
||
| 225 | path = filedialog.askopenfilename(title="Select Input Video File", filetypes=[("MP4 files", "*.mp4"), ("All files", "*.*")]) |
||
| 226 | if path: self.vid_input_path_var.set(path) |
||
| 227 | |||
| 228 | def select_vid_output_dir(self): |
||
| 229 | path = filedialog.askdirectory(title="Select Output Directory") |
||
| 230 | if path: self.vid_output_path_var.set(path) |
||
| 231 | |||
| 232 | def update_video_status(self, message): |
||
| 233 | """Thread-safe method to update the GUI status label.""" |
||
| 234 | self.vid_status_var.set(message) |
||
| 235 | |||
| 236 | def run_video_processing_thread(self): |
||
| 237 | """Starts the video processing in a new thread to avoid freezing the GUI.""" |
||
| 238 | self.run_video_button.config(state="disabled") |
||
| 239 | thread = threading.Thread(target=self.run_video_processing) |
||
| 240 | thread.daemon = True |
||
| 241 | thread.start() |
||
| 242 | |||
| 243 | def run_video_processing(self): |
||
| 244 | """The actual processing logic, run in the background thread.""" |
||
| 245 | try: |
||
| 246 | self.video_processor.input_video_path = self.vid_input_path_var.get() |
||
| 247 | self.video_processor.output_dir = self.vid_output_path_var.get() |
||
| 248 | self.video_processor.blend_width = self.vid_blend_width_var.get() |
||
| 249 | self.video_processor.blend_method = self.vid_method_var.get() |
||
| 250 | |||
| 251 | success, message = self.video_processor.run(status_callback=self.update_video_status) |
||
| 252 | |||
| 253 | if success: |
||
| 254 | messagebox.showinfo("Success", message) |
||
| 255 | else: |
||
| 256 | messagebox.showerror("Error", message) |
||
| 257 | |||
| 258 | except Exception as e: |
||
| 259 | messagebox.showerror("Critical Error", f"An unexpected error occurred: {e}") |
||
| 260 | finally: |
||
| 261 | self.run_video_button.config(state="normal") |
||
| 262 | </code></pre> |
||
| 263 | 5 | Zhi Jie YEW | |
| 264 | 6 | Zhi Jie YEW | h2. main_alpha_blender.py |
| 265 | 5 | Zhi Jie YEW | <pre><code class="python"> |
| 266 | #!/usr/bin/env python |
||
| 267 | # -*- coding: utf-8 -*- |
||
| 268 | |||
| 269 | import cv2 |
||
| 270 | import numpy as np |
||
| 271 | import os |
||
| 272 | from config_reader import ConfigReader |
||
| 273 | |||
| 274 | |||
| 275 | class MainAlphaBlender(object): |
||
| 276 | def __init__(self, config_path="config.json"): |
||
| 277 | try: |
||
| 278 | self.__config_reader = ConfigReader(config_path) |
||
| 279 | self.blend_width = self.__config_reader.get_blend_width() |
||
| 280 | self.gamma_value = self.__config_reader.get_gamma_value() |
||
| 281 | self.method = self.__config_reader.get_blend_method() |
||
| 282 | self.output_dir = self.__config_reader.get_output_dir() |
||
| 283 | self.preview = self.__config_reader.get_preview() |
||
| 284 | self.image_path = self.__config_reader.get_image_path() |
||
| 285 | except FileNotFoundError: |
||
| 286 | self.blend_width = 200 |
||
| 287 | self.gamma_value = 1.4 |
||
| 288 | self.method = "cosine" |
||
| 289 | self.output_dir = "Results" |
||
| 290 | self.preview = True |
||
| 291 | self.image_path = "OriginalImages" |
||
| 292 | self.update_paths() |
||
| 293 | |||
| 294 | def update_paths(self): |
||
| 295 | self.left_image_path = os.path.join(self.image_path, "Left.jpg") |
||
| 296 | self.right_image_path = os.path.join(self.image_path, "Right.jpg") |
||
| 297 | |||
| 298 | def create_alpha_gradient(self, blend_width, side, method="cosine"): |
||
| 299 | if method == 'linear': |
||
| 300 | alpha_gradient = np.linspace(0, 1, blend_width) |
||
| 301 | elif method == 'cosine': |
||
| 302 | t = np.linspace(0, np.pi, blend_width) |
||
| 303 | alpha_gradient = (1 - np.cos(t**0.85)) / 2 |
||
| 304 | elif method == 'quadratic': |
||
| 305 | t = np.linspace(0, 1, blend_width) |
||
| 306 | alpha_gradient = t**2 |
||
| 307 | elif method == 'sqrt': |
||
| 308 | t = np.linspace(0, 1, blend_width) |
||
| 309 | alpha_gradient = np.sqrt(t) |
||
| 310 | elif method == 'log': |
||
| 311 | t = np.linspace(0, 1, blend_width) |
||
| 312 | alpha_gradient = np.log1p(9 * t) / np.log1p(9) |
||
| 313 | elif method == 'sigmoid': |
||
| 314 | t = np.linspace(0, 1, blend_width) |
||
| 315 | alpha_gradient = 1 / (1 + np.exp(-12 * (t - 0.5))) |
||
| 316 | alpha_gradient = (alpha_gradient - alpha_gradient.min()) / (alpha_gradient.max() - alpha_gradient.min()) |
||
| 317 | else: |
||
| 318 | raise ValueError("Invalid method: choose from 'linear', 'cosine', 'quadratic', 'sqrt', 'log', or 'sigmoid'") |
||
| 319 | if side == 'right': |
||
| 320 | alpha_gradient = 1 - alpha_gradient |
||
| 321 | return alpha_gradient |
||
| 322 | |||
| 323 | def gamma_correction(self, image, gamma): |
||
| 324 | img_float = image.astype(np.float32) / 255.0 |
||
| 325 | mean_intensity = np.mean(img_float) |
||
| 326 | adaptive_gamma = gamma * (0.5 / (mean_intensity + 1e-5)) |
||
| 327 | adaptive_gamma = np.clip(adaptive_gamma, 0.8, 2.0) |
||
| 328 | corrected = np.power(img_float, 1.0 / adaptive_gamma) |
||
| 329 | return np.uint8(np.clip(corrected * 255, 0, 255)) |
||
| 330 | |||
| 331 | def alpha_blend_edge(self, image, blend_width, side, method="cosine"): |
||
| 332 | height, width, _ = image.shape |
||
| 333 | blended_image = image.copy() |
||
| 334 | alpha_gradient = self.create_alpha_gradient(blend_width, side, method) |
||
| 335 | if side == 'right': |
||
| 336 | roi = blended_image[:, width - blend_width:] |
||
| 337 | elif side == 'left': |
||
| 338 | roi = blended_image[:, :blend_width] |
||
| 339 | else: |
||
| 340 | raise ValueError("Side must be 'left' or 'right'") |
||
| 341 | gradient_3d = alpha_gradient[np.newaxis, :, np.newaxis] |
||
| 342 | gradient_3d = np.tile(gradient_3d, (height, 1, 3)) |
||
| 343 | if side == 'right': |
||
| 344 | blended_image[:, width - blend_width:] = (roi * gradient_3d).astype(np.uint8) |
||
| 345 | else: |
||
| 346 | blended_image[:, :blend_width] = (roi * gradient_3d).astype(np.uint8) |
||
| 347 | return blended_image |
||
| 348 | |||
| 349 | def show_preview(self, left_image, right_image, scale=0.5): |
||
| 350 | h = min(left_image.shape[0], right_image.shape[0]) |
||
| 351 | left_resized = cv2.resize(left_image, (int(left_image.shape[1]*scale), int(h*scale))) |
||
| 352 | right_resized = cv2.resize(right_image, (int(right_image.shape[1]*scale), int(h*scale))) |
||
| 353 | combined = np.hstack((left_resized, right_resized)) |
||
| 354 | cv2.imshow("Preview (Left + Right)", combined) |
||
| 355 | cv2.waitKey(0) |
||
| 356 | cv2.destroyAllWindows() |
||
| 357 | |||
| 358 | def run(self): |
||
| 359 | try: |
||
| 360 | os.makedirs(self.output_dir, exist_ok=True) |
||
| 361 | 7 | Zhi Jie YEW | |
| 362 | 5 | Zhi Jie YEW | left_img = cv2.imread(self.left_image_path, cv2.IMREAD_COLOR) |
| 363 | right_img = cv2.imread(self.right_image_path, cv2.IMREAD_COLOR) |
||
| 364 | if left_img is None or right_img is None: |
||
| 365 | raise FileNotFoundError(f"Could not read images from '{self.image_path}'. Check path.") |
||
| 366 | 7 | Zhi Jie YEW | |
| 367 | 5 | Zhi Jie YEW | left_blended = self.alpha_blend_edge(left_img, self.blend_width, side='right', method=self.method) |
| 368 | right_blended = self.alpha_blend_edge(right_img, self.blend_width, side='left', method=self.method) |
||
| 369 | 7 | Zhi Jie YEW | |
| 370 | 5 | Zhi Jie YEW | left_gamma = self.gamma_correction(left_blended, self.gamma_value) |
| 371 | 1 | Wing Sum TANG | right_gamma = self.gamma_correction(right_blended, self.gamma_value) |
| 372 | 7 | Zhi Jie YEW | |
| 373 | 1 | Wing Sum TANG | left_output_path = os.path.join(self.output_dir, f"{self.method}_left_gamma.jpg") |
| 374 | right_output_path = os.path.join(self.output_dir, f"{self.method}_right_gamma.jpg") |
||
| 375 | 7 | Zhi Jie YEW | cv2.imwrite(left_output_path, left_blended) |
| 376 | cv2.imwrite(right_output_path, right_blended) |
||
| 377 | |||
| 378 | import tkinter as tk |
||
| 379 | root = tk.Tk() |
||
| 380 | screen_width = root.winfo_screenwidth() |
||
| 381 | screen_height = root.winfo_screenheight() |
||
| 382 | root.destroy() |
||
| 383 | |||
| 384 | window_name = "Display" |
||
| 385 | cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN) |
||
| 386 | cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) |
||
| 387 | |||
| 388 | left_full = cv2.resize(left_gamma, (screen_width, screen_height)) |
||
| 389 | right_full = cv2.resize(right_gamma, (screen_width, screen_height)) |
||
| 390 | |||
| 391 | showing_left = True |
||
| 392 | cv2.imshow(window_name, left_full) |
||
| 393 | |||
| 394 | while True: |
||
| 395 | key = cv2.waitKey(1) & 0xFF |
||
| 396 | if key == ord('0'): |
||
| 397 | if showing_left: |
||
| 398 | cv2.imshow(window_name, right_full) |
||
| 399 | showing_left = False |
||
| 400 | else: |
||
| 401 | break # exit on second '0' |
||
| 402 | elif key == 27: # ESC to exit |
||
| 403 | break |
||
| 404 | |||
| 405 | cv2.destroyAllWindows() |
||
| 406 | return (True, f"Images processed and displayed successfully. Files saved in '{self.output_dir}'.") |
||
| 407 | |||
| 408 | 5 | Zhi Jie YEW | except Exception as e: |
| 409 | cv2.destroyAllWindows() |
||
| 410 | 7 | Zhi Jie YEW | return (False, f"An unexpected error occurred: {e}") |
| 411 | |||
| 412 | 5 | Zhi Jie YEW | </code></pre> |
| 413 | 6 | Zhi Jie YEW | |
| 414 | 5 | Zhi Jie YEW | h2. main.py |
| 415 | <pre><code class="python"> |
||
| 416 | import tkinter as tk |
||
| 417 | from gui import BlenderGUI |
||
| 418 | |||
| 419 | if __name__ == "__main__": |
||
| 420 | """ |
||
| 421 | Main entry point for the application. |
||
| 422 | Initializes and runs the Tkinter GUI. |
||
| 423 | """ |
||
| 424 | root = tk.Tk() |
||
| 425 | app = BlenderGUI(master=root) |
||
| 426 | root.mainloop() |
||
| 427 | </code></pre> |
||
| 428 | 6 | Zhi Jie YEW | |
| 429 | 5 | Zhi Jie YEW | h2. video_processor.py |
| 430 | <pre><code class="python"> |
||
| 431 | import cv2 |
||
| 432 | import numpy as np |
||
| 433 | import os |
||
| 434 | import time |
||
| 435 | |||
| 436 | class VideoProcessor: |
||
| 437 | """ |
||
| 438 | A class to handle dividing a video and applying alpha blending to the edges. |
||
| 439 | Consolidates logic from divide_video.py, apply_alpha_blending_on_video.py, and Video_utility.py. |
||
| 440 | """ |
||
| 441 | def __init__(self, config=None): |
||
| 442 | """Initializes the processor with default or provided settings.""" |
||
| 443 | # Set default parameters |
||
| 444 | self.input_video_path = "" |
||
| 445 | self.output_dir = "VideoResults" |
||
| 446 | self.blend_width = 100 |
||
| 447 | self.blend_method = "linear" |
||
| 448 | self.divide_ratio = 2/3 |
||
| 449 | |||
| 450 | # Overwrite defaults with a configuration dictionary if provided |
||
| 451 | if config: |
||
| 452 | self.input_video_path = config.get("input_video_path", self.input_video_path) |
||
| 453 | self.output_dir = config.get("output_dir", self.output_dir) |
||
| 454 | self.blend_width = config.get("blend_width", self.blend_width) |
||
| 455 | self.blend_method = config.get("blend_method", self.blend_method) |
||
| 456 | self.divide_ratio = config.get("divide_ratio", self.divide_ratio) |
||
| 457 | |||
| 458 | def _create_alpha_gradient(self, blend_width, side, method): |
||
| 459 | """Creates a 1D alpha gradient for blending.""" |
||
| 460 | if method == 'linear': |
||
| 461 | alpha_gradient = np.linspace(0, 1, blend_width) |
||
| 462 | elif method == 'cosine': |
||
| 463 | t = np.linspace(0, np.pi, blend_width) |
||
| 464 | alpha_gradient = (1 - np.cos(t)) / 2 |
||
| 465 | else: |
||
| 466 | raise ValueError(f"Invalid blend method: {method}") |
||
| 467 | |||
| 468 | if side == 'right': |
||
| 469 | alpha_gradient = 1 - alpha_gradient # Create a fade-out gradient |
||
| 470 | return alpha_gradient |
||
| 471 | |||
| 472 | def _blend_image_edge(self, image, blend_width, side, method): |
||
| 473 | """Applies the alpha gradient to a single frame.""" |
||
| 474 | height, width, _ = image.shape |
||
| 475 | blended_image = image.copy() |
||
| 476 | alpha_gradient = self._create_alpha_gradient(blend_width, side, method) |
||
| 477 | |||
| 478 | if side == 'right': |
||
| 479 | roi = blended_image[:, width - blend_width:] |
||
| 480 | elif side == 'left': |
||
| 481 | roi = blended_image[:, :blend_width] |
||
| 482 | else: |
||
| 483 | raise ValueError("Side must be 'left' or 'right'") |
||
| 484 | |||
| 485 | # Tile the 1D gradient to match the 3 color channels of the ROI |
||
| 486 | gradient_3d = alpha_gradient[np.newaxis, :, np.newaxis] |
||
| 487 | gradient_3d = np.tile(gradient_3d, (height, 1, 3)) |
||
| 488 | |||
| 489 | if side == 'right': |
||
| 490 | blended_image[:, width - blend_width:] = (roi * gradient_3d).astype(np.uint8) |
||
| 491 | else: |
||
| 492 | blended_image[:, :blend_width] = (roi * gradient_3d).astype(np.uint8) |
||
| 493 | |||
| 494 | return blended_image |
||
| 495 | |||
| 496 | def _divide_video(self, input_path, output_left_path, output_right_path, status_callback): |
||
| 497 | """Splits a video into two halves based on the divide_ratio.""" |
||
| 498 | cap = cv2.VideoCapture(input_path) |
||
| 499 | if not cap.isOpened(): |
||
| 500 | raise FileNotFoundError(f"Could not open video file: {input_path}") |
||
| 501 | |||
| 502 | fourcc = cv2.VideoWriter_fourcc(*'mp4v') |
||
| 503 | fps = cap.get(cv2.CAP_PROP_FPS) |
||
| 504 | width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) |
||
| 505 | height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) |
||
| 506 | midpoint = int(width * self.divide_ratio) |
||
| 507 | |||
| 508 | out_left = cv2.VideoWriter(output_left_path, fourcc, fps, (midpoint, height)) |
||
| 509 | out_right = cv2.VideoWriter(output_right_path, fourcc, fps, (width - midpoint, height)) |
||
| 510 | |||
| 511 | total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
||
| 512 | frame_count = 0 |
||
| 513 | |||
| 514 | while cap.isOpened(): |
||
| 515 | ret, frame = cap.read() |
||
| 516 | if not ret: break |
||
| 517 | |||
| 518 | frame_count += 1 |
||
| 519 | if status_callback and frame_count % 30 == 0: # Update status every 30 frames |
||
| 520 | progress = int((frame_count / total_frames) * 100) |
||
| 521 | status_callback(f"Dividing video... {progress}%") |
||
| 522 | |||
| 523 | out_left.write(frame[:, :midpoint]) |
||
| 524 | out_right.write(frame[:, midpoint:]) |
||
| 525 | |||
| 526 | cap.release() |
||
| 527 | out_left.release() |
||
| 528 | out_right.release() |
||
| 529 | |||
| 530 | def _apply_alpha_blending_to_video(self, input_path, output_path, side, status_callback): |
||
| 531 | """Applies alpha blending to each frame of a video.""" |
||
| 532 | cap = cv2.VideoCapture(input_path) |
||
| 533 | if not cap.isOpened(): raise FileNotFoundError(f"Could not open video for blending: {input_path}") |
||
| 534 | |||
| 535 | fourcc = cv2.VideoWriter_fourcc(*'mp4v') |
||
| 536 | fps = cap.get(cv2.CAP_PROP_FPS) |
||
| 537 | width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) |
||
| 538 | height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) |
||
| 539 | out = cv2.VideoWriter(output_path, fourcc, fps, (width, height)) |
||
| 540 | |||
| 541 | total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
||
| 542 | frame_count = 0 |
||
| 543 | |||
| 544 | while cap.isOpened(): |
||
| 545 | ret, frame = cap.read() |
||
| 546 | if not ret: break |
||
| 547 | |||
| 548 | frame_count += 1 |
||
| 549 | if status_callback and frame_count % 30 == 0: |
||
| 550 | progress = int((frame_count / total_frames) * 100) |
||
| 551 | status_callback(f"Blending {side} video... {progress}%") |
||
| 552 | |||
| 553 | blended_frame = self._blend_image_edge(frame, self.blend_width, side, self.blend_method) |
||
| 554 | out.write(blended_frame) |
||
| 555 | |||
| 556 | cap.release() |
||
| 557 | out.release() |
||
| 558 | |||
| 559 | def run(self, status_callback=None): |
||
| 560 | """Executes the full video processing pipeline.""" |
||
| 561 | try: |
||
| 562 | start_time = time.time() |
||
| 563 | os.makedirs(self.output_dir, exist_ok=True) |
||
| 564 | |||
| 565 | # Define intermediate and final file paths |
||
| 566 | temp_left_path = os.path.join(self.output_dir, "temp_left.mp4") |
||
| 567 | temp_right_path = os.path.join(self.output_dir, "temp_right.mp4") |
||
| 568 | final_left_path = os.path.join(self.output_dir, "final_left.mp4") |
||
| 569 | final_right_path = os.path.join(self.output_dir, "final_right.mp4") |
||
| 570 | |||
| 571 | if status_callback: status_callback("Starting to divide video...") |
||
| 572 | self._divide_video(self.input_video_path, temp_left_path, temp_right_path, status_callback) |
||
| 573 | |||
| 574 | if status_callback: status_callback("Starting to blend left video...") |
||
| 575 | self._apply_alpha_blending_to_video(temp_left_path, final_left_path, "right", status_callback) |
||
| 576 | |||
| 577 | if status_callback: status_callback("Starting to blend right video...") |
||
| 578 | self._apply_alpha_blending_to_video(temp_right_path, final_right_path, "left", status_callback) |
||
| 579 | |||
| 580 | if status_callback: status_callback("Cleaning up temporary files...") |
||
| 581 | os.remove(temp_left_path) |
||
| 582 | os.remove(temp_right_path) |
||
| 583 | |||
| 584 | duration = time.time() - start_time |
||
| 585 | message = f"Video processing complete in {duration:.2f}s. Files saved in '{self.output_dir}'." |
||
| 586 | if status_callback: status_callback(message) |
||
| 587 | return (True, message) |
||
| 588 | |||
| 589 | except Exception as e: |
||
| 590 | if status_callback: status_callback(f"Error: {e}") |
||
| 591 | return (False, str(e)) |
||
| 592 | </code></pre> |
||
| 593 | 6 | Zhi Jie YEW | |
| 594 | 5 | Zhi Jie YEW | h2. config_reader.py |
| 595 | <pre><code class="python"> |
||
| 596 | #!/usr/bin/env python |
||
| 597 | # -*- coding: utf-8 -*- |
||
| 598 | |||
| 599 | import json |
||
| 600 | |||
| 601 | class ConfigReader: |
||
| 602 | """ |
||
| 603 | ConfigReader loads configuration settings from a JSON file. |
||
| 604 | It now uses a single base path for input images. |
||
| 605 | """ |
||
| 606 | |||
| 607 | def __init__(self, json_path: str): |
||
| 608 | """ |
||
| 609 | Initialize the ConfigReader with the path to the configuration file. |
||
| 610 | :param json_path: Path to the JSON configuration file |
||
| 611 | """ |
||
| 612 | self.json_path = json_path |
||
| 613 | self.config = None |
||
| 614 | self.load_config() |
||
| 615 | |||
| 616 | def load_config(self): |
||
| 617 | """Load configuration data from the JSON file.""" |
||
| 618 | try: |
||
| 619 | with open(self.json_path, 'r', encoding='utf-8') as f: |
||
| 620 | self.config = json.load(f) |
||
| 621 | except FileNotFoundError: |
||
| 622 | raise FileNotFoundError(f"Configuration file not found: {self.json_path}") |
||
| 623 | except json.JSONDecodeError as e: |
||
| 624 | raise ValueError(f"Error decoding JSON: {e}") |
||
| 625 | |||
| 626 | def get_blend_width(self) -> int: |
||
| 627 | """Return the blending width.""" |
||
| 628 | return self.config.get("blend_width", 200) |
||
| 629 | |||
| 630 | def get_gamma_value(self) -> float: |
||
| 631 | """Return the gamma correction value.""" |
||
| 632 | return self.config.get("gamma_value", 1.0) |
||
| 633 | |||
| 634 | def get_blend_method(self) -> str: |
||
| 635 | """Return the blending method (e.g., 'cosine', 'linear').""" |
||
| 636 | return self.config.get("blend_method", "linear") |
||
| 637 | |||
| 638 | def get_image_path(self) -> str: |
||
| 639 | """Return the base directory of the input images.""" |
||
| 640 | 1 | Wing Sum TANG | return self.config.get("image_path", "") |
| 641 | |||
| 642 | def get_output_dir(self) -> str: |
||
| 643 | """Return the directory for output results.""" |
||
| 644 | return self.config.get("output_dir", "Results") |
||
| 645 | |||
| 646 | def get_preview(self) -> bool: |
||
| 647 | """Return the preview flag.""" |
||
| 648 | return self.config.get("preview", False) |
||
| 649 | 7 | Zhi Jie YEW | </code></pre> |
| 650 | |||
| 651 | h2. videosynch.py |
||
| 652 | <pre><code class="python"> |
||
| 653 | import cv2 |
||
| 654 | import os |
||
| 655 | import time |
||
| 656 | from datetime import datetime |
||
| 657 | |||
| 658 | video_name = "BadApple.mp4" |
||
| 659 | video_path = os.path.join(os.getcwd(), video_name) |
||
| 660 | |||
| 661 | target_time_str = "15:46:30" |
||
| 662 | target_time = datetime.strptime(target_time_str, "%H:%M:%S").time() |
||
| 663 | |||
| 664 | print(f"Waiting until {target_time_str} to start video...") |
||
| 665 | |||
| 666 | while True: |
||
| 667 | now = datetime.now().time() |
||
| 668 | if now >= target_time: |
||
| 669 | break |
||
| 670 | time.sleep(0.5) |
||
| 671 | |||
| 672 | print("Starting video fullscreen!") |
||
| 673 | |||
| 674 | cap = cv2.VideoCapture(video_path) |
||
| 675 | |||
| 676 | if not cap.isOpened(): |
||
| 677 | print(f"Error: Cannot open video file at {video_path}") |
||
| 678 | exit() |
||
| 679 | |||
| 680 | fps = cap.get(cv2.CAP_PROP_FPS) |
||
| 681 | |||
| 682 | # Create fullscreen window |
||
| 683 | cv2.namedWindow("Video Playback", cv2.WND_PROP_FULLSCREEN) |
||
| 684 | cv2.setWindowProperty("Video Playback", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) |
||
| 685 | |||
| 686 | while True: |
||
| 687 | ret, frame = cap.read() |
||
| 688 | if not ret: |
||
| 689 | break |
||
| 690 | |||
| 691 | cv2.imshow("Video Playback", frame) |
||
| 692 | if cv2.waitKey(int(1000 / fps)) & 0xFF == ord('q'): |
||
| 693 | break |
||
| 694 | |||
| 695 | cap.release() |
||
| 696 | cv2.destroyAllWindows() |
||
| 697 | 1 | Wing Sum TANG | </code></pre> |