Codes » History » Version 6
Zhi Jie YEW, 11/06/2025 02:51 PM
| 1 | 2 | Anderson PHILLIP | [[Wiki|← Back to Start Page]] |
|---|---|---|---|
| 2 | |||
| 3 | 1 | Wing Sum TANG | h1. Codes |
| 4 | 3 | Zhi Jie YEW | |
| 5 | 6 | Zhi Jie YEW | h2. gui.py |
| 6 | 4 | Zhi Jie YEW | <pre><code class="python"> |
| 7 | 3 | Zhi Jie YEW | # gui.py |
| 8 | |||
| 9 | import tkinter as tk |
||
| 10 | from tkinter import ttk, filedialog, messagebox |
||
| 11 | import threading |
||
| 12 | import json |
||
| 13 | import os |
||
| 14 | |||
| 15 | # Import the logic classes |
||
| 16 | from main_alpha_blender import MainAlphaBlender |
||
| 17 | from video_processor import VideoProcessor |
||
| 18 | |||
| 19 | class BlenderGUI: |
||
| 20 | """A Tkinter GUI with tabs for image and video edge blending.""" |
||
| 21 | def __init__(self, master): |
||
| 22 | self.master = master |
||
| 23 | master.title("Image and Video Edge Blender") |
||
| 24 | master.geometry("600x450") # Increased height for new buttons |
||
| 25 | |||
| 26 | # --- Create a Tabbed Interface --- |
||
| 27 | self.notebook = ttk.Notebook(master) |
||
| 28 | self.notebook.pack(pady=10, padx=10, fill="both", expand=True) |
||
| 29 | |||
| 30 | self.image_tab = ttk.Frame(self.notebook, padding="10") |
||
| 31 | self.video_tab = ttk.Frame(self.notebook, padding="10") |
||
| 32 | |||
| 33 | self.notebook.add(self.image_tab, text="Image Blender") |
||
| 34 | self.notebook.add(self.video_tab, text="Video Processor") |
||
| 35 | |||
| 36 | # --- Populate each tab --- |
||
| 37 | self.create_image_widgets() |
||
| 38 | self.create_video_widgets() |
||
| 39 | |||
| 40 | # --- NEW: Add a frame at the bottom for config management --- |
||
| 41 | self.config_frame = ttk.Frame(master, padding=(10, 0, 10, 10)) |
||
| 42 | self.config_frame.pack(fill=tk.X, side=tk.BOTTOM) |
||
| 43 | self.create_config_widgets() |
||
| 44 | |||
| 45 | # --- NEW: Load default config on startup --- |
||
| 46 | # It will silently fail if config.json doesn't exist, using hardcoded defaults. |
||
| 47 | self.load_config(filepath="config.json", silent=True) |
||
| 48 | |||
| 49 | def create_image_widgets(self): |
||
| 50 | """Creates all widgets for the Image Blender tab.""" |
||
| 51 | self.image_blender = MainAlphaBlender() |
||
| 52 | |||
| 53 | ttk.Label(self.image_tab, text="Input Image Directory:").grid(row=0, column=0, sticky=tk.W, pady=2) |
||
| 54 | self.img_input_path_var = tk.StringVar(value=self.image_blender.image_path) |
||
| 55 | ttk.Entry(self.image_tab, textvariable=self.img_input_path_var, width=50).grid(row=0, column=1, sticky=tk.EW, padx=5) |
||
| 56 | ttk.Button(self.image_tab, text="Browse...", command=self.select_img_input_dir).grid(row=0, column=2) |
||
| 57 | |||
| 58 | ttk.Label(self.image_tab, text="Output Directory:").grid(row=1, column=0, sticky=tk.W, pady=2) |
||
| 59 | self.img_output_path_var = tk.StringVar(value=self.image_blender.output_dir) |
||
| 60 | ttk.Entry(self.image_tab, textvariable=self.img_output_path_var, width=50).grid(row=1, column=1, sticky=tk.EW, padx=5) |
||
| 61 | ttk.Button(self.image_tab, text="Browse...", command=self.select_img_output_dir).grid(row=1, column=2) |
||
| 62 | |||
| 63 | ttk.Label(self.image_tab, text="Blend Width (pixels):").grid(row=2, column=0, sticky=tk.W, pady=5) |
||
| 64 | self.img_blend_width_var = tk.IntVar(value=self.image_blender.blend_width) |
||
| 65 | ttk.Entry(self.image_tab, textvariable=self.img_blend_width_var, width=10).grid(row=2, column=1, sticky=tk.W, padx=5) |
||
| 66 | |||
| 67 | ttk.Label(self.image_tab, text="Gamma Value:").grid(row=3, column=0, sticky=tk.W, pady=2) |
||
| 68 | self.img_gamma_var = tk.DoubleVar(value=self.image_blender.gamma_value) |
||
| 69 | ttk.Entry(self.image_tab, textvariable=self.img_gamma_var, width=10).grid(row=3, column=1, sticky=tk.W, padx=5) |
||
| 70 | |||
| 71 | ttk.Label(self.image_tab, text="Blend Method:").grid(row=4, column=0, sticky=tk.W, pady=2) |
||
| 72 | self.img_method_var = tk.StringVar(value=self.image_blender.method) |
||
| 73 | methods = ['linear', 'cosine', 'quadratic', 'sqrt', 'log', 'sigmoid'] |
||
| 74 | ttk.Combobox(self.image_tab, textvariable=self.img_method_var, values=methods, state="readonly").grid(row=4, column=1, sticky=tk.W, padx=5) |
||
| 75 | |||
| 76 | self.img_preview_var = tk.BooleanVar(value=self.image_blender.preview) |
||
| 77 | ttk.Checkbutton(self.image_tab, text="Show Preview After Processing", variable=self.img_preview_var).grid(row=5, column=1, sticky=tk.W, pady=10, padx=5) |
||
| 78 | |||
| 79 | ttk.Button(self.image_tab, text="Run Blending Process", command=self.run_image_blending).grid(row=6, column=1, pady=20, sticky=tk.W) |
||
| 80 | |||
| 81 | self.img_status_var = tk.StringVar(value="Ready.") |
||
| 82 | ttk.Label(self.image_tab, textvariable=self.img_status_var, font=("Helvetica", 10, "italic")).grid(row=7, column=0, columnspan=3, sticky=tk.W, pady=5) |
||
| 83 | |||
| 84 | self.image_tab.columnconfigure(1, weight=1) |
||
| 85 | |||
| 86 | def create_video_widgets(self): |
||
| 87 | """Creates all widgets for the Video Processor tab.""" |
||
| 88 | self.video_processor = VideoProcessor() |
||
| 89 | |||
| 90 | ttk.Label(self.video_tab, text="Input Video File:").grid(row=0, column=0, sticky=tk.W, pady=2) |
||
| 91 | self.vid_input_path_var = tk.StringVar() |
||
| 92 | ttk.Entry(self.video_tab, textvariable=self.vid_input_path_var, width=50).grid(row=0, column=1, sticky=tk.EW, padx=5) |
||
| 93 | ttk.Button(self.video_tab, text="Browse...", command=self.select_vid_input_file).grid(row=0, column=2) |
||
| 94 | |||
| 95 | ttk.Label(self.video_tab, text="Output Directory:").grid(row=1, column=0, sticky=tk.W, pady=2) |
||
| 96 | self.vid_output_path_var = tk.StringVar(value=self.video_processor.output_dir) |
||
| 97 | ttk.Entry(self.video_tab, textvariable=self.vid_output_path_var, width=50).grid(row=1, column=1, sticky=tk.EW, padx=5) |
||
| 98 | ttk.Button(self.video_tab, text="Browse...", command=self.select_vid_output_dir).grid(row=1, column=2) |
||
| 99 | |||
| 100 | ttk.Label(self.video_tab, text="Blend Width (pixels):").grid(row=2, column=0, sticky=tk.W, pady=5) |
||
| 101 | self.vid_blend_width_var = tk.IntVar(value=self.video_processor.blend_width) |
||
| 102 | ttk.Entry(self.video_tab, textvariable=self.vid_blend_width_var, width=10).grid(row=2, column=1, sticky=tk.W, padx=5) |
||
| 103 | |||
| 104 | ttk.Label(self.video_tab, text="Blend Method:").grid(row=3, column=0, sticky=tk.W, pady=2) |
||
| 105 | self.vid_method_var = tk.StringVar(value=self.video_processor.blend_method) |
||
| 106 | methods = ['linear', 'cosine'] |
||
| 107 | ttk.Combobox(self.video_tab, textvariable=self.vid_method_var, values=methods, state="readonly").grid(row=3, column=1, sticky=tk.W, padx=5) |
||
| 108 | |||
| 109 | self.run_video_button = ttk.Button(self.video_tab, text="Process Video", command=self.run_video_processing_thread) |
||
| 110 | self.run_video_button.grid(row=4, column=1, pady=20, sticky=tk.W) |
||
| 111 | |||
| 112 | self.vid_status_var = tk.StringVar(value="Ready.") |
||
| 113 | ttk.Label(self.video_tab, textvariable=self.vid_status_var).grid(row=5, column=0, columnspan=3, sticky=tk.W, pady=5) |
||
| 114 | |||
| 115 | self.video_tab.columnconfigure(1, weight=1) |
||
| 116 | |||
| 117 | def create_config_widgets(self): |
||
| 118 | """Creates the Load and Save configuration buttons.""" |
||
| 119 | ttk.Button(self.config_frame, text="Load Config", command=self.load_config).pack(side=tk.LEFT, padx=5) |
||
| 120 | ttk.Button(self.config_frame, text="Save Config", command=self.save_config).pack(side=tk.LEFT, padx=5) |
||
| 121 | |||
| 122 | def load_config(self, filepath=None, silent=False): |
||
| 123 | """Loads settings from a JSON file and updates the GUI.""" |
||
| 124 | if filepath is None: |
||
| 125 | filepath = filedialog.askopenfilename( |
||
| 126 | title="Open Configuration File", |
||
| 127 | filetypes=[("JSON files", "*.json"), ("All files", "*.*")] |
||
| 128 | ) |
||
| 129 | |||
| 130 | if not filepath or not os.path.exists(filepath): |
||
| 131 | if not silent: |
||
| 132 | messagebox.showwarning("Load Config", "No configuration file selected or file not found.") |
||
| 133 | return |
||
| 134 | |||
| 135 | try: |
||
| 136 | with open(filepath, 'r') as f: |
||
| 137 | data = json.load(f) |
||
| 138 | |||
| 139 | # Update Image Tab variables |
||
| 140 | self.img_input_path_var.set(data.get("image_path", "OriginalImages")) |
||
| 141 | self.img_output_path_var.set(data.get("output_dir", "Results")) |
||
| 142 | self.img_blend_width_var.set(data.get("blend_width", 200)) |
||
| 143 | self.img_gamma_var.set(data.get("gamma_value", 1.4)) |
||
| 144 | self.img_method_var.set(data.get("blend_method", "cosine")) |
||
| 145 | self.img_preview_var.set(data.get("preview", True)) |
||
| 146 | |||
| 147 | # Update Video Tab variables |
||
| 148 | self.vid_input_path_var.set(data.get("video_input_path", "")) |
||
| 149 | self.vid_output_path_var.set(data.get("video_output_dir", "VideoResults")) |
||
| 150 | self.vid_blend_width_var.set(data.get("video_blend_width", 100)) |
||
| 151 | self.vid_method_var.set(data.get("video_blend_method", "linear")) |
||
| 152 | |||
| 153 | if not silent: |
||
| 154 | messagebox.showinfo("Load Config", f"Configuration loaded successfully from {os.path.basename(filepath)}.") |
||
| 155 | |||
| 156 | except Exception as e: |
||
| 157 | if not silent: |
||
| 158 | messagebox.showerror("Load Config Error", f"Failed to load or parse the configuration file.\n\nError: {e}") |
||
| 159 | |||
| 160 | def save_config(self): |
||
| 161 | """Saves the current GUI settings to a JSON file.""" |
||
| 162 | filepath = filedialog.asksaveasfilename( |
||
| 163 | title="Save Configuration File", |
||
| 164 | defaultextension=".json", |
||
| 165 | initialfile="config.json", |
||
| 166 | filetypes=[("JSON files", "*.json"), ("All files", "*.*")] |
||
| 167 | ) |
||
| 168 | |||
| 169 | if not filepath: |
||
| 170 | return |
||
| 171 | |||
| 172 | try: |
||
| 173 | config_data = { |
||
| 174 | # Image Tab settings |
||
| 175 | "image_path": self.img_input_path_var.get(), |
||
| 176 | "output_dir": self.img_output_path_var.get(), |
||
| 177 | "blend_width": self.img_blend_width_var.get(), |
||
| 178 | "gamma_value": self.img_gamma_var.get(), |
||
| 179 | "blend_method": self.img_method_var.get(), |
||
| 180 | "preview": self.img_preview_var.get(), |
||
| 181 | |||
| 182 | # Video Tab settings |
||
| 183 | "video_input_path": self.vid_input_path_var.get(), |
||
| 184 | "video_output_dir": self.vid_output_path_var.get(), |
||
| 185 | "video_blend_width": self.vid_blend_width_var.get(), |
||
| 186 | "video_blend_method": self.vid_method_var.get() |
||
| 187 | } |
||
| 188 | |||
| 189 | with open(filepath, 'w') as f: |
||
| 190 | json.dump(config_data, f, indent=4) |
||
| 191 | |||
| 192 | messagebox.showinfo("Save Config", f"Configuration saved successfully to {os.path.basename(filepath)}.") |
||
| 193 | |||
| 194 | except Exception as e: |
||
| 195 | messagebox.showerror("Save Config Error", f"Failed to save the configuration file.\n\nError: {e}") |
||
| 196 | |||
| 197 | # --- Callbacks for Image Tab --- |
||
| 198 | def select_img_input_dir(self): |
||
| 199 | path = filedialog.askdirectory(title="Select Input Image Directory") |
||
| 200 | if path: self.img_input_path_var.set(path) |
||
| 201 | |||
| 202 | def select_img_output_dir(self): |
||
| 203 | path = filedialog.askdirectory(title="Select Output Directory") |
||
| 204 | if path: self.img_output_path_var.set(path) |
||
| 205 | |||
| 206 | def run_image_blending(self): |
||
| 207 | self.image_blender.image_path = self.img_input_path_var.get() |
||
| 208 | self.image_blender.output_dir = self.img_output_path_var.get() |
||
| 209 | self.image_blender.blend_width = self.img_blend_width_var.get() |
||
| 210 | self.image_blender.gamma_value = self.img_gamma_var.get() |
||
| 211 | self.image_blender.method = self.img_method_var.get() |
||
| 212 | self.image_blender.preview = self.img_preview_var.get() |
||
| 213 | self.image_blender.update_paths() |
||
| 214 | |||
| 215 | success, message = self.image_blender.run() |
||
| 216 | if success: |
||
| 217 | self.img_status_var.set(f"Success! {message}") |
||
| 218 | messagebox.showinfo("Success", message) |
||
| 219 | else: |
||
| 220 | self.img_status_var.set(f"Error: {message}") |
||
| 221 | messagebox.showerror("Error", message) |
||
| 222 | |||
| 223 | # --- Callbacks for Video Tab --- |
||
| 224 | def select_vid_input_file(self): |
||
| 225 | path = filedialog.askopenfilename(title="Select Input Video File", filetypes=[("MP4 files", "*.mp4"), ("All files", "*.*")]) |
||
| 226 | if path: self.vid_input_path_var.set(path) |
||
| 227 | |||
| 228 | def select_vid_output_dir(self): |
||
| 229 | path = filedialog.askdirectory(title="Select Output Directory") |
||
| 230 | if path: self.vid_output_path_var.set(path) |
||
| 231 | |||
| 232 | def update_video_status(self, message): |
||
| 233 | """Thread-safe method to update the GUI status label.""" |
||
| 234 | self.vid_status_var.set(message) |
||
| 235 | |||
| 236 | def run_video_processing_thread(self): |
||
| 237 | """Starts the video processing in a new thread to avoid freezing the GUI.""" |
||
| 238 | self.run_video_button.config(state="disabled") |
||
| 239 | thread = threading.Thread(target=self.run_video_processing) |
||
| 240 | thread.daemon = True |
||
| 241 | thread.start() |
||
| 242 | |||
| 243 | def run_video_processing(self): |
||
| 244 | """The actual processing logic, run in the background thread.""" |
||
| 245 | try: |
||
| 246 | self.video_processor.input_video_path = self.vid_input_path_var.get() |
||
| 247 | self.video_processor.output_dir = self.vid_output_path_var.get() |
||
| 248 | self.video_processor.blend_width = self.vid_blend_width_var.get() |
||
| 249 | self.video_processor.blend_method = self.vid_method_var.get() |
||
| 250 | |||
| 251 | success, message = self.video_processor.run(status_callback=self.update_video_status) |
||
| 252 | |||
| 253 | if success: |
||
| 254 | messagebox.showinfo("Success", message) |
||
| 255 | else: |
||
| 256 | messagebox.showerror("Error", message) |
||
| 257 | |||
| 258 | except Exception as e: |
||
| 259 | messagebox.showerror("Critical Error", f"An unexpected error occurred: {e}") |
||
| 260 | finally: |
||
| 261 | self.run_video_button.config(state="normal") |
||
| 262 | </code></pre> |
||
| 263 | 5 | Zhi Jie YEW | |
| 264 | 6 | Zhi Jie YEW | h2. main_alpha_blender.py |
| 265 | 5 | Zhi Jie YEW | <pre><code class="python"> |
| 266 | #!/usr/bin/env python |
||
| 267 | # -*- coding: utf-8 -*- |
||
| 268 | |||
| 269 | import cv2 |
||
| 270 | import numpy as np |
||
| 271 | import os |
||
| 272 | from config_reader import ConfigReader |
||
| 273 | |||
| 274 | |||
| 275 | class MainAlphaBlender(object): |
||
| 276 | def __init__(self, config_path="config.json"): |
||
| 277 | try: |
||
| 278 | self.__config_reader = ConfigReader(config_path) |
||
| 279 | self.blend_width = self.__config_reader.get_blend_width() |
||
| 280 | self.gamma_value = self.__config_reader.get_gamma_value() |
||
| 281 | self.method = self.__config_reader.get_blend_method() |
||
| 282 | self.output_dir = self.__config_reader.get_output_dir() |
||
| 283 | self.preview = self.__config_reader.get_preview() |
||
| 284 | self.image_path = self.__config_reader.get_image_path() |
||
| 285 | except FileNotFoundError: |
||
| 286 | self.blend_width = 200 |
||
| 287 | self.gamma_value = 1.4 |
||
| 288 | self.method = "cosine" |
||
| 289 | self.output_dir = "Results" |
||
| 290 | self.preview = True |
||
| 291 | self.image_path = "OriginalImages" |
||
| 292 | self.update_paths() |
||
| 293 | |||
| 294 | def update_paths(self): |
||
| 295 | self.left_image_path = os.path.join(self.image_path, "Left.jpg") |
||
| 296 | self.right_image_path = os.path.join(self.image_path, "Right.jpg") |
||
| 297 | |||
| 298 | def create_alpha_gradient(self, blend_width, side, method="cosine"): |
||
| 299 | if method == 'linear': |
||
| 300 | alpha_gradient = np.linspace(0, 1, blend_width) |
||
| 301 | elif method == 'cosine': |
||
| 302 | t = np.linspace(0, np.pi, blend_width) |
||
| 303 | alpha_gradient = (1 - np.cos(t**0.85)) / 2 |
||
| 304 | elif method == 'quadratic': |
||
| 305 | t = np.linspace(0, 1, blend_width) |
||
| 306 | alpha_gradient = t**2 |
||
| 307 | elif method == 'sqrt': |
||
| 308 | t = np.linspace(0, 1, blend_width) |
||
| 309 | alpha_gradient = np.sqrt(t) |
||
| 310 | elif method == 'log': |
||
| 311 | t = np.linspace(0, 1, blend_width) |
||
| 312 | alpha_gradient = np.log1p(9 * t) / np.log1p(9) |
||
| 313 | elif method == 'sigmoid': |
||
| 314 | t = np.linspace(0, 1, blend_width) |
||
| 315 | alpha_gradient = 1 / (1 + np.exp(-12 * (t - 0.5))) |
||
| 316 | alpha_gradient = (alpha_gradient - alpha_gradient.min()) / (alpha_gradient.max() - alpha_gradient.min()) |
||
| 317 | else: |
||
| 318 | raise ValueError("Invalid method: choose from 'linear', 'cosine', 'quadratic', 'sqrt', 'log', or 'sigmoid'") |
||
| 319 | if side == 'right': |
||
| 320 | alpha_gradient = 1 - alpha_gradient |
||
| 321 | return alpha_gradient |
||
| 322 | |||
| 323 | def gamma_correction(self, image, gamma): |
||
| 324 | img_float = image.astype(np.float32) / 255.0 |
||
| 325 | mean_intensity = np.mean(img_float) |
||
| 326 | adaptive_gamma = gamma * (0.5 / (mean_intensity + 1e-5)) |
||
| 327 | adaptive_gamma = np.clip(adaptive_gamma, 0.8, 2.0) |
||
| 328 | corrected = np.power(img_float, 1.0 / adaptive_gamma) |
||
| 329 | return np.uint8(np.clip(corrected * 255, 0, 255)) |
||
| 330 | |||
| 331 | def alpha_blend_edge(self, image, blend_width, side, method="cosine"): |
||
| 332 | height, width, _ = image.shape |
||
| 333 | blended_image = image.copy() |
||
| 334 | alpha_gradient = self.create_alpha_gradient(blend_width, side, method) |
||
| 335 | if side == 'right': |
||
| 336 | roi = blended_image[:, width - blend_width:] |
||
| 337 | elif side == 'left': |
||
| 338 | roi = blended_image[:, :blend_width] |
||
| 339 | else: |
||
| 340 | raise ValueError("Side must be 'left' or 'right'") |
||
| 341 | gradient_3d = alpha_gradient[np.newaxis, :, np.newaxis] |
||
| 342 | gradient_3d = np.tile(gradient_3d, (height, 1, 3)) |
||
| 343 | if side == 'right': |
||
| 344 | blended_image[:, width - blend_width:] = (roi * gradient_3d).astype(np.uint8) |
||
| 345 | else: |
||
| 346 | blended_image[:, :blend_width] = (roi * gradient_3d).astype(np.uint8) |
||
| 347 | return blended_image |
||
| 348 | |||
| 349 | def show_preview(self, left_image, right_image, scale=0.5): |
||
| 350 | h = min(left_image.shape[0], right_image.shape[0]) |
||
| 351 | left_resized = cv2.resize(left_image, (int(left_image.shape[1]*scale), int(h*scale))) |
||
| 352 | right_resized = cv2.resize(right_image, (int(right_image.shape[1]*scale), int(h*scale))) |
||
| 353 | combined = np.hstack((left_resized, right_resized)) |
||
| 354 | cv2.imshow("Preview (Left + Right)", combined) |
||
| 355 | cv2.waitKey(0) |
||
| 356 | cv2.destroyAllWindows() |
||
| 357 | |||
| 358 | def run(self): |
||
| 359 | try: |
||
| 360 | os.makedirs(self.output_dir, exist_ok=True) |
||
| 361 | left_img = cv2.imread(self.left_image_path, cv2.IMREAD_COLOR) |
||
| 362 | right_img = cv2.imread(self.right_image_path, cv2.IMREAD_COLOR) |
||
| 363 | if left_img is None or right_img is None: |
||
| 364 | raise FileNotFoundError(f"Could not read images from '{self.image_path}'. Check path.") |
||
| 365 | left_blended = self.alpha_blend_edge(left_img, self.blend_width, side='right', method=self.method) |
||
| 366 | right_blended = self.alpha_blend_edge(right_img, self.blend_width, side='left', method=self.method) |
||
| 367 | left_gamma = self.gamma_correction(left_blended, self.gamma_value) |
||
| 368 | right_gamma = self.gamma_correction(right_blended, self.gamma_value) |
||
| 369 | left_output_path = os.path.join(self.output_dir, f"{self.method}_left_gamma.jpg") |
||
| 370 | right_output_path = os.path.join(self.output_dir, f"{self.method}_right_gamma.jpg") |
||
| 371 | cv2.imwrite(left_output_path, left_gamma) |
||
| 372 | cv2.imwrite(right_output_path, right_gamma) |
||
| 373 | if self.preview: |
||
| 374 | self.show_preview(left_gamma, right_gamma) |
||
| 375 | return (True, f"Images saved successfully in '{self.output_dir}'.") |
||
| 376 | except (FileNotFoundError, ValueError) as e: |
||
| 377 | return (False, str(e)) |
||
| 378 | except Exception as e: |
||
| 379 | return (False, f"An unexpected error occurred: {e}") |
||
| 380 | finally: |
||
| 381 | cv2.destroyAllWindows() |
||
| 382 | </code></pre> |
||
| 383 | |||
| 384 | 6 | Zhi Jie YEW | h2. main.py |
| 385 | 5 | Zhi Jie YEW | <pre><code class="python"> |
| 386 | import tkinter as tk |
||
| 387 | from gui import BlenderGUI |
||
| 388 | |||
| 389 | if __name__ == "__main__": |
||
| 390 | """ |
||
| 391 | Main entry point for the application. |
||
| 392 | Initializes and runs the Tkinter GUI. |
||
| 393 | """ |
||
| 394 | root = tk.Tk() |
||
| 395 | app = BlenderGUI(master=root) |
||
| 396 | root.mainloop() |
||
| 397 | </code></pre> |
||
| 398 | |||
| 399 | 6 | Zhi Jie YEW | h2. video_processor.py |
| 400 | 5 | Zhi Jie YEW | <pre><code class="python"> |
| 401 | import cv2 |
||
| 402 | import numpy as np |
||
| 403 | import os |
||
| 404 | import time |
||
| 405 | |||
| 406 | class VideoProcessor: |
||
| 407 | """ |
||
| 408 | A class to handle dividing a video and applying alpha blending to the edges. |
||
| 409 | Consolidates logic from divide_video.py, apply_alpha_blending_on_video.py, and Video_utility.py. |
||
| 410 | """ |
||
| 411 | def __init__(self, config=None): |
||
| 412 | """Initializes the processor with default or provided settings.""" |
||
| 413 | # Set default parameters |
||
| 414 | self.input_video_path = "" |
||
| 415 | self.output_dir = "VideoResults" |
||
| 416 | self.blend_width = 100 |
||
| 417 | self.blend_method = "linear" |
||
| 418 | self.divide_ratio = 2/3 |
||
| 419 | |||
| 420 | # Overwrite defaults with a configuration dictionary if provided |
||
| 421 | if config: |
||
| 422 | self.input_video_path = config.get("input_video_path", self.input_video_path) |
||
| 423 | self.output_dir = config.get("output_dir", self.output_dir) |
||
| 424 | self.blend_width = config.get("blend_width", self.blend_width) |
||
| 425 | self.blend_method = config.get("blend_method", self.blend_method) |
||
| 426 | self.divide_ratio = config.get("divide_ratio", self.divide_ratio) |
||
| 427 | |||
| 428 | def _create_alpha_gradient(self, blend_width, side, method): |
||
| 429 | """Creates a 1D alpha gradient for blending.""" |
||
| 430 | if method == 'linear': |
||
| 431 | alpha_gradient = np.linspace(0, 1, blend_width) |
||
| 432 | elif method == 'cosine': |
||
| 433 | t = np.linspace(0, np.pi, blend_width) |
||
| 434 | alpha_gradient = (1 - np.cos(t)) / 2 |
||
| 435 | else: |
||
| 436 | raise ValueError(f"Invalid blend method: {method}") |
||
| 437 | |||
| 438 | if side == 'right': |
||
| 439 | alpha_gradient = 1 - alpha_gradient # Create a fade-out gradient |
||
| 440 | return alpha_gradient |
||
| 441 | |||
| 442 | def _blend_image_edge(self, image, blend_width, side, method): |
||
| 443 | """Applies the alpha gradient to a single frame.""" |
||
| 444 | height, width, _ = image.shape |
||
| 445 | blended_image = image.copy() |
||
| 446 | alpha_gradient = self._create_alpha_gradient(blend_width, side, method) |
||
| 447 | |||
| 448 | if side == 'right': |
||
| 449 | roi = blended_image[:, width - blend_width:] |
||
| 450 | elif side == 'left': |
||
| 451 | roi = blended_image[:, :blend_width] |
||
| 452 | else: |
||
| 453 | raise ValueError("Side must be 'left' or 'right'") |
||
| 454 | |||
| 455 | # Tile the 1D gradient to match the 3 color channels of the ROI |
||
| 456 | gradient_3d = alpha_gradient[np.newaxis, :, np.newaxis] |
||
| 457 | gradient_3d = np.tile(gradient_3d, (height, 1, 3)) |
||
| 458 | |||
| 459 | if side == 'right': |
||
| 460 | blended_image[:, width - blend_width:] = (roi * gradient_3d).astype(np.uint8) |
||
| 461 | else: |
||
| 462 | blended_image[:, :blend_width] = (roi * gradient_3d).astype(np.uint8) |
||
| 463 | |||
| 464 | return blended_image |
||
| 465 | |||
| 466 | def _divide_video(self, input_path, output_left_path, output_right_path, status_callback): |
||
| 467 | """Splits a video into two halves based on the divide_ratio.""" |
||
| 468 | cap = cv2.VideoCapture(input_path) |
||
| 469 | if not cap.isOpened(): |
||
| 470 | raise FileNotFoundError(f"Could not open video file: {input_path}") |
||
| 471 | |||
| 472 | fourcc = cv2.VideoWriter_fourcc(*'mp4v') |
||
| 473 | fps = cap.get(cv2.CAP_PROP_FPS) |
||
| 474 | width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) |
||
| 475 | height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) |
||
| 476 | midpoint = int(width * self.divide_ratio) |
||
| 477 | |||
| 478 | out_left = cv2.VideoWriter(output_left_path, fourcc, fps, (midpoint, height)) |
||
| 479 | out_right = cv2.VideoWriter(output_right_path, fourcc, fps, (width - midpoint, height)) |
||
| 480 | |||
| 481 | total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
||
| 482 | frame_count = 0 |
||
| 483 | |||
| 484 | while cap.isOpened(): |
||
| 485 | ret, frame = cap.read() |
||
| 486 | if not ret: break |
||
| 487 | |||
| 488 | frame_count += 1 |
||
| 489 | if status_callback and frame_count % 30 == 0: # Update status every 30 frames |
||
| 490 | progress = int((frame_count / total_frames) * 100) |
||
| 491 | status_callback(f"Dividing video... {progress}%") |
||
| 492 | |||
| 493 | out_left.write(frame[:, :midpoint]) |
||
| 494 | out_right.write(frame[:, midpoint:]) |
||
| 495 | |||
| 496 | cap.release() |
||
| 497 | out_left.release() |
||
| 498 | out_right.release() |
||
| 499 | |||
| 500 | def _apply_alpha_blending_to_video(self, input_path, output_path, side, status_callback): |
||
| 501 | """Applies alpha blending to each frame of a video.""" |
||
| 502 | cap = cv2.VideoCapture(input_path) |
||
| 503 | if not cap.isOpened(): raise FileNotFoundError(f"Could not open video for blending: {input_path}") |
||
| 504 | |||
| 505 | fourcc = cv2.VideoWriter_fourcc(*'mp4v') |
||
| 506 | fps = cap.get(cv2.CAP_PROP_FPS) |
||
| 507 | width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) |
||
| 508 | height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) |
||
| 509 | out = cv2.VideoWriter(output_path, fourcc, fps, (width, height)) |
||
| 510 | |||
| 511 | total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
||
| 512 | frame_count = 0 |
||
| 513 | |||
| 514 | while cap.isOpened(): |
||
| 515 | ret, frame = cap.read() |
||
| 516 | if not ret: break |
||
| 517 | |||
| 518 | frame_count += 1 |
||
| 519 | if status_callback and frame_count % 30 == 0: |
||
| 520 | progress = int((frame_count / total_frames) * 100) |
||
| 521 | status_callback(f"Blending {side} video... {progress}%") |
||
| 522 | |||
| 523 | blended_frame = self._blend_image_edge(frame, self.blend_width, side, self.blend_method) |
||
| 524 | out.write(blended_frame) |
||
| 525 | |||
| 526 | cap.release() |
||
| 527 | out.release() |
||
| 528 | |||
| 529 | def run(self, status_callback=None): |
||
| 530 | """Executes the full video processing pipeline.""" |
||
| 531 | try: |
||
| 532 | start_time = time.time() |
||
| 533 | os.makedirs(self.output_dir, exist_ok=True) |
||
| 534 | |||
| 535 | # Define intermediate and final file paths |
||
| 536 | temp_left_path = os.path.join(self.output_dir, "temp_left.mp4") |
||
| 537 | temp_right_path = os.path.join(self.output_dir, "temp_right.mp4") |
||
| 538 | final_left_path = os.path.join(self.output_dir, "final_left.mp4") |
||
| 539 | final_right_path = os.path.join(self.output_dir, "final_right.mp4") |
||
| 540 | |||
| 541 | if status_callback: status_callback("Starting to divide video...") |
||
| 542 | self._divide_video(self.input_video_path, temp_left_path, temp_right_path, status_callback) |
||
| 543 | |||
| 544 | if status_callback: status_callback("Starting to blend left video...") |
||
| 545 | self._apply_alpha_blending_to_video(temp_left_path, final_left_path, "right", status_callback) |
||
| 546 | |||
| 547 | if status_callback: status_callback("Starting to blend right video...") |
||
| 548 | self._apply_alpha_blending_to_video(temp_right_path, final_right_path, "left", status_callback) |
||
| 549 | |||
| 550 | if status_callback: status_callback("Cleaning up temporary files...") |
||
| 551 | os.remove(temp_left_path) |
||
| 552 | os.remove(temp_right_path) |
||
| 553 | |||
| 554 | duration = time.time() - start_time |
||
| 555 | message = f"Video processing complete in {duration:.2f}s. Files saved in '{self.output_dir}'." |
||
| 556 | if status_callback: status_callback(message) |
||
| 557 | return (True, message) |
||
| 558 | |||
| 559 | except Exception as e: |
||
| 560 | if status_callback: status_callback(f"Error: {e}") |
||
| 561 | return (False, str(e)) |
||
| 562 | </code></pre> |
||
| 563 | |||
| 564 | 6 | Zhi Jie YEW | h2. config_reader.py |
| 565 | 5 | Zhi Jie YEW | <pre><code class="python"> |
| 566 | #!/usr/bin/env python |
||
| 567 | # -*- coding: utf-8 -*- |
||
| 568 | |||
| 569 | import json |
||
| 570 | |||
| 571 | class ConfigReader: |
||
| 572 | """ |
||
| 573 | ConfigReader loads configuration settings from a JSON file. |
||
| 574 | It now uses a single base path for input images. |
||
| 575 | """ |
||
| 576 | |||
| 577 | def __init__(self, json_path: str): |
||
| 578 | """ |
||
| 579 | Initialize the ConfigReader with the path to the configuration file. |
||
| 580 | :param json_path: Path to the JSON configuration file |
||
| 581 | """ |
||
| 582 | self.json_path = json_path |
||
| 583 | self.config = None |
||
| 584 | self.load_config() |
||
| 585 | |||
| 586 | def load_config(self): |
||
| 587 | """Load configuration data from the JSON file.""" |
||
| 588 | try: |
||
| 589 | with open(self.json_path, 'r', encoding='utf-8') as f: |
||
| 590 | self.config = json.load(f) |
||
| 591 | except FileNotFoundError: |
||
| 592 | raise FileNotFoundError(f"Configuration file not found: {self.json_path}") |
||
| 593 | except json.JSONDecodeError as e: |
||
| 594 | raise ValueError(f"Error decoding JSON: {e}") |
||
| 595 | |||
| 596 | def get_blend_width(self) -> int: |
||
| 597 | """Return the blending width.""" |
||
| 598 | return self.config.get("blend_width", 200) |
||
| 599 | |||
| 600 | def get_gamma_value(self) -> float: |
||
| 601 | """Return the gamma correction value.""" |
||
| 602 | return self.config.get("gamma_value", 1.0) |
||
| 603 | |||
| 604 | def get_blend_method(self) -> str: |
||
| 605 | """Return the blending method (e.g., 'cosine', 'linear').""" |
||
| 606 | return self.config.get("blend_method", "linear") |
||
| 607 | |||
| 608 | def get_image_path(self) -> str: |
||
| 609 | """Return the base directory of the input images.""" |
||
| 610 | return self.config.get("image_path", "") |
||
| 611 | |||
| 612 | def get_output_dir(self) -> str: |
||
| 613 | """Return the directory for output results.""" |
||
| 614 | return self.config.get("output_dir", "Results") |
||
| 615 | |||
| 616 | def get_preview(self) -> bool: |
||
| 617 | """Return the preview flag.""" |
||
| 618 | return self.config.get("preview", False) |
||
| 619 | </code></pre> |