- Implementación de detector híbrido (Whisper + Chat + Audio + VLM) - Sistema de detección de gameplay real vs hablando - Scene detection con FFmpeg - Soporte para RTX 3050 y RX 6800 XT - Guía completa en 6800xt.md para próxima IA - Scripts de filtrado visual y análisis de contexto - Pipeline automatizado de generación de videos
233 lines
6.8 KiB
Python
233 lines
6.8 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
GAMEPLAY ACTIVE DETECTOR
|
|
Detecta solo momentos donde realmente está jugando (no intro, no selección, no hablando solo)
|
|
"""
|
|
|
|
import json
|
|
import re
|
|
import numpy as np
|
|
from pathlib import Path
|
|
|
|
|
|
def analyze_gameplay_activity(transcription_file, chat_file):
|
|
"""
|
|
Analiza cuándo hay gameplay activo vs cuando solo está hablando.
|
|
|
|
Señales de gameplay activo:
|
|
- Chat con keywords de LoL (kill, gank, baron, etc)
|
|
- Audio con picos (gritos, reacciones intensas)
|
|
- Transcripción con acciones de juego (ulti, flash, etc)
|
|
|
|
Señales de NO gameplay:
|
|
- Solo hablando de temas random
|
|
- Selección de campeones
|
|
- Esperando en base
|
|
"""
|
|
|
|
print("=" * 60)
|
|
print("GAMEPLAY ACTIVITY ANALYZER")
|
|
print("=" * 60)
|
|
|
|
# Cargar datos
|
|
with open(transcription_file, "r") as f:
|
|
trans = json.load(f)
|
|
|
|
with open(chat_file, "r") as f:
|
|
chat_data = json.load(f)
|
|
|
|
# Keywords que indican gameplay activo
|
|
gameplay_keywords = [
|
|
"kill",
|
|
"matan",
|
|
"muere",
|
|
"gank",
|
|
"gankean",
|
|
"teamfight",
|
|
"fight",
|
|
"ulti",
|
|
"ultimate",
|
|
"flash",
|
|
"ignite",
|
|
"exhaust",
|
|
"heal",
|
|
"baron",
|
|
"dragón",
|
|
"dragon",
|
|
"torre",
|
|
"tower",
|
|
"inhib",
|
|
"pentakill",
|
|
"quadra",
|
|
"triple",
|
|
"doble",
|
|
"ace",
|
|
"jungle",
|
|
"jungla",
|
|
"adc",
|
|
"support",
|
|
"top",
|
|
"mid",
|
|
" Warwick",
|
|
"Diana",
|
|
"Yasuo",
|
|
"Zed",
|
|
"Lee Sin",
|
|
"campeón",
|
|
]
|
|
|
|
# Analizar cada segundo del video
|
|
duration = int(max(seg["end"] for seg in trans["segments"])) + 1
|
|
gameplay_score = np.zeros(duration)
|
|
|
|
# 1. Puntuar por transcripción
|
|
for seg in trans["segments"]:
|
|
text = seg["text"].lower()
|
|
start = int(seg["start"])
|
|
end = int(seg["end"])
|
|
|
|
score = 0
|
|
# Keywords de gameplay
|
|
for kw in gameplay_keywords:
|
|
if kw in text:
|
|
score += 2
|
|
|
|
# Acciones específicas
|
|
if any(word in text for word in ["me mataron", "me mori", "kill", "mate"]):
|
|
score += 5
|
|
if any(word in text for word in ["ulti", "flash", "ignite"]):
|
|
score += 3
|
|
if any(word in text for word in ["joder", "puta", "mierda", "no puede ser"]):
|
|
score += 2
|
|
|
|
# Penalizar selección de campeones y temas off-topic
|
|
if any(
|
|
word in text for word in ["champions", "selección", "ban", "pick", "elijo"]
|
|
):
|
|
score -= 10 # Penalizar fuerte selección
|
|
if any(
|
|
word in text for word in ["cuento", "historia", "ayer", "mañana", "comida"]
|
|
):
|
|
score -= 5 # Penalizar charla random
|
|
|
|
for i in range(start, min(end + 1, duration)):
|
|
gameplay_score[i] += score
|
|
|
|
# 2. Puntuar por chat
|
|
chat_activity = np.zeros(duration)
|
|
for comment in chat_data["comments"]:
|
|
sec = int(comment["content_offset_seconds"])
|
|
if sec < duration:
|
|
msg = comment["message"]["body"].lower()
|
|
|
|
# Chat sobre gameplay
|
|
for kw in gameplay_keywords:
|
|
if kw in msg:
|
|
chat_activity[sec] += 1
|
|
|
|
# Mucha actividad = probablemente gameplay intenso
|
|
chat_activity[sec] += 0.5
|
|
|
|
# Suavizar chat
|
|
from scipy.ndimage import uniform_filter1d
|
|
|
|
chat_smooth = uniform_filter1d(chat_activity, size=5, mode="constant")
|
|
|
|
# Combinar scores
|
|
gameplay_score += chat_smooth * 2
|
|
|
|
# Suavizar resultado
|
|
gameplay_smooth = uniform_filter1d(gameplay_score, size=15, mode="constant")
|
|
|
|
# Encontrar regiones de gameplay activo
|
|
threshold = np.percentile(gameplay_smooth[gameplay_smooth > 0], 40)
|
|
print(f"Umbral de gameplay: {threshold:.1f}")
|
|
|
|
active_regions = []
|
|
in_gameplay = False
|
|
region_start = 0
|
|
|
|
for i in range(455, len(gameplay_smooth)): # Saltar intro
|
|
if gameplay_smooth[i] > threshold:
|
|
if not in_gameplay:
|
|
region_start = i
|
|
in_gameplay = True
|
|
else:
|
|
if in_gameplay:
|
|
if i - region_start >= 20: # Mínimo 20 segundos
|
|
active_regions.append((region_start, i))
|
|
in_gameplay = False
|
|
|
|
# Capturar última región
|
|
if in_gameplay and len(gameplay_smooth) - region_start >= 20:
|
|
active_regions.append((region_start, len(gameplay_smooth)))
|
|
|
|
print(f"Regiones de gameplay activo: {len(active_regions)}")
|
|
|
|
return active_regions, gameplay_smooth
|
|
|
|
|
|
def filter_rage_moments(rage_moments, gameplay_regions, min_overlap=0.5):
|
|
"""
|
|
Filtra momentos de rage para mantener solo los que ocurren durante gameplay activo.
|
|
|
|
Args:
|
|
rage_moments: Lista de momentos de rage
|
|
gameplay_regions: Lista de (start, end) con gameplay activo
|
|
min_overlap: Mínimo porcentaje de superposición requerida
|
|
"""
|
|
filtered = []
|
|
|
|
for moment in rage_moments:
|
|
moment_start = moment["start"]
|
|
moment_end = moment["end"]
|
|
moment_duration = moment_end - moment_start
|
|
|
|
# Buscar si hay gameplay activo durante este momento
|
|
best_overlap = 0
|
|
best_region = None
|
|
|
|
for g_start, g_end in gameplay_regions:
|
|
# Calcular superposición
|
|
overlap_start = max(moment_start, g_start)
|
|
overlap_end = min(moment_end, g_end)
|
|
overlap_duration = max(0, overlap_end - overlap_start)
|
|
|
|
if overlap_duration > best_overlap:
|
|
best_overlap = overlap_duration
|
|
best_region = (g_start, g_end)
|
|
|
|
# Si hay suficiente superposición, mantener el momento
|
|
if best_region and best_overlap >= moment_duration * min_overlap:
|
|
# Ajustar límites al gameplay activo
|
|
new_start = max(moment_start, best_region[0] - 5) # 5s antes
|
|
new_end = min(moment_end, best_region[1] + 10) # 10s después
|
|
|
|
moment["start"] = int(new_start)
|
|
moment["end"] = int(new_end)
|
|
moment["gameplay_overlap"] = best_overlap
|
|
filtered.append(moment)
|
|
|
|
print(f"Momentos filtrados (solo gameplay): {len(filtered)} de {len(rage_moments)}")
|
|
return filtered
|
|
|
|
|
|
if __name__ == "__main__":
|
|
# 1. Detectar gameplay activo
|
|
regions, scores = analyze_gameplay_activity(
|
|
"transcripcion_rage.json", "elxokas_chat.json"
|
|
)
|
|
|
|
print("\nRegiones de gameplay:")
|
|
for i, (s, e) in enumerate(regions[:10], 1):
|
|
mins_s, secs_s = divmod(s, 60)
|
|
mins_e, secs_e = divmod(e, 60)
|
|
dur = e - s
|
|
print(f"{i}. {mins_s:02d}:{secs_s:02d} - {mins_e:02d}:{secs_e:02d} ({dur}s)")
|
|
|
|
# Guardar regiones
|
|
with open("gameplay_regions.json", "w") as f:
|
|
json.dump(regions, f)
|
|
|
|
print(f"\nGuardado en gameplay_regions.json")
|