- Implementación de detector híbrido (Whisper + Chat + Audio + VLM) - Sistema de detección de gameplay real vs hablando - Scene detection con FFmpeg - Soporte para RTX 3050 y RX 6800 XT - Guía completa en 6800xt.md para próxima IA - Scripts de filtrado visual y análisis de contexto - Pipeline automatizado de generación de videos
286 lines
8.9 KiB
Python
286 lines
8.9 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
PIPELINE COMPLETO:
|
|
|
|
1. Whisper completo (video original)
|
|
2. Minimax 1ª pasada: analiza TODO elige mejores momentos
|
|
3. Extrae clips del video
|
|
4. Whisper a highlights: transcribe SOLO los clips
|
|
5. Minimax 2ª pasada: analiza CADA CLIP y los refina
|
|
|
|
"""
|
|
import json
|
|
import logging
|
|
import subprocess
|
|
import os
|
|
from pathlib import Path
|
|
|
|
logging.basicConfig(level=logging.INFO)
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
def paso_1_whisper_completo(video_path, output_transcripcion="transcripcion_completa.json"):
|
|
"""
|
|
Paso 1: Transcribir el video completo con Whisper (GPU).
|
|
"""
|
|
logger.info("="*70)
|
|
logger.info("PASO 1: Whisper completo - Transcribiendo video original...")
|
|
logger.info("="*70)
|
|
|
|
cmd = [
|
|
"python3", "transcribe_with_whisper.py",
|
|
"--video", video_path,
|
|
"--output", output_transcripcion,
|
|
"--model", "base"
|
|
]
|
|
|
|
result = subprocess.run(cmd, check=True)
|
|
logger.info(f"✓ Transcripción guardada en {output_transcripcion}")
|
|
|
|
return output_transcripcion
|
|
|
|
|
|
def paso_2_minimax_primera_pasada(transcripcion_json, output_intervals="intervals_v1.json"):
|
|
"""
|
|
Paso 2: Minimax analiza TODA la transcripción y elige los mejores momentos.
|
|
"""
|
|
logger.info("="*70)
|
|
logger.info("PASO 2: Minimax 1ª pasada - Analizando stream completo...")
|
|
logger.info("="*70)
|
|
|
|
# Usar el detector de muertes/fallos que ya creamos
|
|
cmd = [
|
|
"python3", "detector_muertes.py",
|
|
"--transcripcion", transcripcion_json,
|
|
"--output", output_intervals,
|
|
"--top", "50",
|
|
"--min-duration", "10",
|
|
"--max-duration", "25"
|
|
]
|
|
|
|
subprocess.run(cmd, check=True)
|
|
logger.info(f"✓ Intervalos guardados en {output_intervals}")
|
|
|
|
return output_intervals
|
|
|
|
|
|
def paso_3_extraer_clips(video_path, intervals_json, output_video="highlights_v1.mp4"):
|
|
"""
|
|
Paso 3: Extraer clips del video original.
|
|
"""
|
|
logger.info("="*70)
|
|
logger.info("PASO 3: Extrayendo clips del video original...")
|
|
logger.info("="*70)
|
|
|
|
cmd = [
|
|
"python3", "generate_video.py",
|
|
"--video", video_path,
|
|
"--highlights", intervals_json,
|
|
"--output", output_video
|
|
]
|
|
|
|
subprocess.run(cmd, check=True)
|
|
logger.info(f"✓ Video guardado en {output_video}")
|
|
|
|
return output_video
|
|
|
|
|
|
def paso_4_whisper_a_clips(video_clips, output_transcripcion="transcripcion_clips.json"):
|
|
"""
|
|
Paso 4: Transcribir SOLO los clips con Whisper.
|
|
"""
|
|
logger.info("="*70)
|
|
logger.info("PASO 4: Whisper a highlights - Transcribiendo SOLO los clips...")
|
|
logger.info("="*70)
|
|
|
|
cmd = [
|
|
"python3", "transcribe_with_whisper.py",
|
|
"--video", video_clips,
|
|
"--output", output_transcripcion,
|
|
"--model", "base"
|
|
]
|
|
|
|
subprocess.run(cmd, check=True)
|
|
logger.info(f"✓ Transcripción de clips guardada en {output_transcripcion}")
|
|
|
|
return output_transcripcion
|
|
|
|
|
|
def paso_5_minimax_segunda_pasada(intervals_v1, transcripcion_clips, intervals_json):
|
|
"""
|
|
Paso 5: Minimax analiza CADA CLIP individualmente y los refina.
|
|
|
|
Para cada clip:
|
|
- Lee la transcripción de ese clip
|
|
- Decide si incluirlo, excluirlo, o recortarlo
|
|
"""
|
|
logger.info("="*70)
|
|
logger.info("PASO 5: Minimax 2ª pasada - Refinando cada clip...")
|
|
logger.info("="*70)
|
|
|
|
with open(intervals_v1, 'r') as f:
|
|
intervals = json.load(f)
|
|
|
|
with open(transcripcion_clips, 'r') as f:
|
|
trans_data = json.load(f)
|
|
|
|
# Importar OpenAI para minimax
|
|
from openai import OpenAI
|
|
|
|
client = OpenAI(
|
|
base_url=os.environ.get("OPENAI_BASE_URL", "https://api.minimax.io/v1"),
|
|
api_key=os.environ.get("OPENAI_API_KEY")
|
|
)
|
|
|
|
refined_intervals = []
|
|
|
|
# Analizar clips en grupos de 10 para no saturar la API
|
|
batch_size = 10
|
|
for i in range(0, len(intervals), batch_size):
|
|
batch = intervals[i:i+batch_size]
|
|
|
|
# Preparar descripción de los clips
|
|
clips_desc = []
|
|
for j, (start, end) in enumerate(batch):
|
|
duration = end - start
|
|
mins = start // 60
|
|
secs = start % 60
|
|
|
|
# Buscar segmentos de transcripción en este rango
|
|
segments_text = []
|
|
for seg in trans_data.get("segments", []):
|
|
if seg["start"] >= start and seg["end"] <= end:
|
|
segments_text.append(seg["text"].strip())
|
|
|
|
text_preview = " ".join(segments_text)[:150]
|
|
|
|
clips_desc.append(f"Clip {j+1}: [{mins:02d}:{secs:02d}] ({duration}s) - {text_preview}")
|
|
|
|
batch_text = "\n".join(clips_desc)
|
|
|
|
prompt = f"""Eres un editor final de highlights. TU MISIÓN: Decidir qué hacer con cada clip.
|
|
|
|
CLIPS A ANALIZAR:
|
|
{batch_text}
|
|
|
|
PARA CADA CLIP, responde con una de estas opciones:
|
|
- "KEEP: Clip con contenido bueno de muerte/fallo"
|
|
- "TRIM X-Y: Recortar desde X hasta Y segundos del clip (para quitar relleno)"
|
|
- "DROP: Clip sin contenido interesante"
|
|
|
|
FORMATO: Una línea por clip con tu decisión.
|
|
|
|
Ejemplo:
|
|
KEEP
|
|
TRIM 2-8
|
|
DROP
|
|
KEEP
|
|
TRIM 3-10
|
|
|
|
Tus decisiones para estos {len(batch)} clips:"""
|
|
|
|
try:
|
|
response = client.chat.completions.create(
|
|
model="MiniMax-M2.5",
|
|
messages=[
|
|
{"role": "system", "content": "Eres un editor experto que refina highlights."},
|
|
{"role": "user", "content": prompt}
|
|
],
|
|
temperature=0.2,
|
|
max_tokens=500
|
|
)
|
|
|
|
content = response.choices[0].message.content.strip()
|
|
|
|
# Parsear decisiones
|
|
decisions = content.split('\n')
|
|
|
|
for j, decision in enumerate(decisions):
|
|
if j >= len(batch):
|
|
break
|
|
|
|
original_start, original_end = batch[j]
|
|
decision = decision.strip().upper()
|
|
|
|
if "KEEP" in decision or "MANTENER" in decision:
|
|
refined_intervals.append([original_start, original_end])
|
|
logger.info(f" Clip {j+1}: KEEP")
|
|
|
|
elif "DROP" in decision or "EXCLUIR" in decision:
|
|
logger.info(f" Clip {j+1}: DROP")
|
|
|
|
elif "TRIM" in decision or "RECORTAR" in decision:
|
|
# Extraer números del TRIM
|
|
import re
|
|
numbers = re.findall(r'\d+', decision)
|
|
if len(numbers) >= 2:
|
|
trim_start = int(numbers[0])
|
|
trim_end = int(numbers[1])
|
|
new_start = original_start + trim_start
|
|
new_end = original_start + min(trim_end, original_end - original_start)
|
|
if new_end - new_start >= 5: # Mínimo 5 segundos
|
|
refined_intervals.append([new_start, new_end])
|
|
logger.info(f" Clip {j+1}: TRIM {trim_start}-{trim_end}s")
|
|
else:
|
|
logger.info(f" Clip {j+1}: TRIM too short, DROP")
|
|
else:
|
|
logger.info(f" Clip {j+1}: TRIM format error, KEEP")
|
|
refined_intervals.append([original_start, original_end])
|
|
|
|
else:
|
|
# Si no se entiende, mantener
|
|
refined_intervals.append([original_start, original_end])
|
|
logger.info(f" Clip {j+1}: ? KEEP (default)")
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error procesando batch: {e}")
|
|
# En caso de error, mantener todos
|
|
refined_intervals.extend(batch)
|
|
|
|
# Guardar
|
|
with open(intervals_json, 'w') as f:
|
|
json.dump(refined_intervals, f)
|
|
|
|
logger.info(f"✓ Intervalos refinados guardados en {intervals_json}")
|
|
logger.info(f" Originales: {len(intervals)} → Refinados: {len(refined_intervals)}")
|
|
|
|
return intervals_json
|
|
|
|
|
|
def main():
|
|
import argparse
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument("--video", required=True, help="Video original")
|
|
parser.add_argument("--output", default="HIGHLIGHTS_FINAL.mp4")
|
|
args = parser.parse_args()
|
|
|
|
video_path = args.video
|
|
|
|
# Ejecutar pipeline completo
|
|
transcripcion_completa = paso_1_whisper_completo(video_path)
|
|
intervals_v1 = paso_2_minimax_primera_pasada(transcripcion_completa)
|
|
video_v1 = paso_3_extraer_clips(video_path, intervals_v1)
|
|
transcripcion_clips = paso_4_whisper_a_clips(video_v1)
|
|
intervals_v2 = paso_5_minimax_segunda_pasada(intervals_v1, transcripcion_clips, "intervals_v2.json")
|
|
|
|
# Generar video final
|
|
logger.info("="*70)
|
|
logger.info("GENERANDO VIDEO FINAL...")
|
|
logger.info("="*70)
|
|
|
|
subprocess.run([
|
|
"python3", "generate_video.py",
|
|
"--video", video_path,
|
|
"--highlights", intervals_v2,
|
|
"--output", args.output
|
|
], check=True)
|
|
|
|
logger.info("="*70)
|
|
logger.info("¡PIPELINE COMPLETADO!")
|
|
logger.info(f"Video final: {args.output}")
|
|
logger.info("="*70)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|