import cv2
import numpy as np
import os
import time

class VideoProcessor:
    """
    A class to handle dividing a video and applying alpha blending to the edges.
    Consolidates logic from divide_video.py, apply_alpha_blending_on_video.py, and Video_utility.py.
    """
    def __init__(self, config=None):
        """Initializes the processor with default or provided settings."""
        # Set default parameters
        self.input_video_path = ""
        self.output_dir = "VideoResults"
        self.blend_width = 100
        self.blend_method = "linear"
        self.divide_ratio = 2/3

        # Overwrite defaults with a configuration dictionary if provided
        if config:
            self.input_video_path = config.get("input_video_path", self.input_video_path)
            self.output_dir = config.get("output_dir", self.output_dir)
            self.blend_width = config.get("blend_width", self.blend_width)
            self.blend_method = config.get("blend_method", self.blend_method)
            self.divide_ratio = config.get("divide_ratio", self.divide_ratio)

    def _create_alpha_gradient(self, blend_width, side, method):
        """Creates a 1D alpha gradient for blending."""
        if method == 'linear':
            alpha_gradient = np.linspace(0, 1, blend_width)
        elif method == 'cosine':
            t = np.linspace(0, np.pi, blend_width)
            alpha_gradient = (1 - np.cos(t)) / 2
        else:
            raise ValueError(f"Invalid blend method: {method}")

        if side == 'right':
            alpha_gradient = 1 - alpha_gradient  # Create a fade-out gradient
        return alpha_gradient

    def _blend_image_edge(self, image, blend_width, side, method):
        """Applies the alpha gradient to a single frame."""
        height, width, _ = image.shape
        blended_image = image.copy()
        alpha_gradient = self._create_alpha_gradient(blend_width, side, method)

        if side == 'right':
            roi = blended_image[:, width - blend_width:]
        elif side == 'left':
            roi = blended_image[:, :blend_width]
        else:
            raise ValueError("Side must be 'left' or 'right'")

        # Tile the 1D gradient to match the 3 color channels of the ROI
        gradient_3d = alpha_gradient[np.newaxis, :, np.newaxis]
        gradient_3d = np.tile(gradient_3d, (height, 1, 3))

        if side == 'right':
            blended_image[:, width - blend_width:] = (roi * gradient_3d).astype(np.uint8)
        else:
            blended_image[:, :blend_width] = (roi * gradient_3d).astype(np.uint8)

        return blended_image

    def _divide_video(self, input_path, output_left_path, output_right_path, status_callback):
        """Splits a video into two halves based on the divide_ratio."""
        cap = cv2.VideoCapture(input_path)
        if not cap.isOpened():
            raise FileNotFoundError(f"Could not open video file: {input_path}")

        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        fps = cap.get(cv2.CAP_PROP_FPS)
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        midpoint = int(width * self.divide_ratio)

        out_left = cv2.VideoWriter(output_left_path, fourcc, fps, (midpoint, height))
        out_right = cv2.VideoWriter(output_right_path, fourcc, fps, (width - midpoint, height))

        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        frame_count = 0

        while cap.isOpened():
            ret, frame = cap.read()
            if not ret: break
            
            frame_count += 1
            if status_callback and frame_count % 30 == 0: # Update status every 30 frames
                progress = int((frame_count / total_frames) * 100)
                status_callback(f"Dividing video... {progress}%")

            out_left.write(frame[:, :midpoint])
            out_right.write(frame[:, midpoint:])

        cap.release()
        out_left.release()
        out_right.release()

    def _apply_alpha_blending_to_video(self, input_path, output_path, side, status_callback):
        """Applies alpha blending to each frame of a video."""
        cap = cv2.VideoCapture(input_path)
        if not cap.isOpened(): raise FileNotFoundError(f"Could not open video for blending: {input_path}")

        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        fps = cap.get(cv2.CAP_PROP_FPS)
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))

        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        frame_count = 0

        while cap.isOpened():
            ret, frame = cap.read()
            if not ret: break
            
            frame_count += 1
            if status_callback and frame_count % 30 == 0:
                progress = int((frame_count / total_frames) * 100)
                status_callback(f"Blending {side} video... {progress}%")

            blended_frame = self._blend_image_edge(frame, self.blend_width, side, self.blend_method)
            out.write(blended_frame)

        cap.release()
        out.release()

    def run(self, status_callback=None):
        """Executes the full video processing pipeline."""
        try:
            start_time = time.time()
            os.makedirs(self.output_dir, exist_ok=True)

            # Define intermediate and final file paths
            temp_left_path = os.path.join(self.output_dir, "temp_left.mp4")
            temp_right_path = os.path.join(self.output_dir, "temp_right.mp4")
            final_left_path = os.path.join(self.output_dir, "final_left.mp4")
            final_right_path = os.path.join(self.output_dir, "final_right.mp4")

            if status_callback: status_callback("Starting to divide video...")
            self._divide_video(self.input_video_path, temp_left_path, temp_right_path, status_callback)
            
            if status_callback: status_callback("Starting to blend left video...")
            self._apply_alpha_blending_to_video(temp_left_path, final_left_path, "right", status_callback)

            if status_callback: status_callback("Starting to blend right video...")
            self._apply_alpha_blending_to_video(temp_right_path, final_right_path, "left", status_callback)

            if status_callback: status_callback("Cleaning up temporary files...")
            os.remove(temp_left_path)
            os.remove(temp_right_path)

            duration = time.time() - start_time
            message = f"Video processing complete in {duration:.2f}s. Files saved in '{self.output_dir}'."
            if status_callback: status_callback(message)
            return (True, message)

        except Exception as e:
            if status_callback: status_callback(f"Error: {e}")
            return (False, str(e))