Problem: System was stuck using single all_tracks folder, causing: - No bucket sampling diversity (all files in 1 folder) - Repetitive sample selection - No coherence between sections - Fades/volumes not auto-applied Fixes: 1. Changed DEFAULT_LIBRARY from all_tracks to organized_samples - server.py: Updated SAMPLES_DIR - sample_manager.py: Updated base_dir - health_check.py: Added organized_samples as primary paths 2. organized_samples structure enables T013 bucket sampling: - loops/bass: 34 samples - loops/synth: 43 samples - loops/vocal: 24 samples - oneshots/kick: 20 samples - oneshots/perc: 35 samples - Each subfolder < 15 files = perfect for bucket sampling 3. Added auto-automation to generate_song(): - Fade-in 4 bars for kick/bass/hat (intro) - Build curve: music tracks 0.5 -> 0.9 (32-40 bars) - Reverb automation: 0% -> 40% -> 0% on atmos/pad/vocal - apply_automation parameter (default True) 4. Each track now gets diverse samples from different subfolders: - Bass from loops/bass (34 options) - Synth from loops/synth (43 options) - Drums from oneshots/ (kick, perc, snare) Coverage wheel will now track usage across 20+ subfolders instead of 1. Diversity memory will work correctly with proper family tracking per folder. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
10349 lines
392 KiB
Python
10349 lines
392 KiB
Python
from human_feel import HumanFeelEngine
|
|
"""
|
|
AbletonMCP AI Server - Servidor MCP para generación musical
|
|
Integra FastMCP con Ableton Live 12
|
|
|
|
Para ejecutar:
|
|
python -m AbletonMCP_AI.MCP_Server.server
|
|
|
|
O con uv:
|
|
uv run python -m AbletonMCP_AI.MCP_Server.server
|
|
"""
|
|
|
|
from mcp.server.fastmcp import FastMCP, Context
|
|
import socket
|
|
import json
|
|
import logging
|
|
import os
|
|
import random
|
|
import shutil
|
|
import sys
|
|
import time
|
|
import ctypes
|
|
from dataclasses import dataclass
|
|
from collections import deque
|
|
from contextlib import asynccontextmanager
|
|
from typing import AsyncIterator, Dict, Any, List, Optional, Set, Tuple, Union
|
|
from pathlib import Path
|
|
|
|
# Añadir paths para imports directos y de paquete
|
|
# FIX: Use absolute path to ensure correct resolution regardless of execution location
|
|
PROGRAM_DATA_DIR = Path("C:/ProgramData/Ableton/Live 12 Suite/Resources/MIDI Remote Scripts")
|
|
SERVER_DIR = PROGRAM_DATA_DIR / "AbletonMCP_AI" / "MCP_Server"
|
|
PACKAGE_DIR = PROGRAM_DATA_DIR / "AbletonMCP_AI"
|
|
for import_path in (str(SERVER_DIR), str(PACKAGE_DIR)):
|
|
if import_path not in sys.path:
|
|
sys.path.insert(0, import_path)
|
|
|
|
try:
|
|
from song_generator import SongGenerator, StyleConfig
|
|
from sample_index import SampleIndex
|
|
from reference_listener import ReferenceAudioListener
|
|
from audio_resampler import AudioResampler
|
|
except ImportError:
|
|
# Fallback si no están disponibles
|
|
SongGenerator = None
|
|
SampleIndex = None
|
|
ReferenceAudioListener = None
|
|
AudioResampler = None
|
|
|
|
# FASE 2.C/D/E: Fingerprint y Wild Card
|
|
try:
|
|
from audio_fingerprint import (
|
|
get_fingerprint_db, get_family_tracker,
|
|
WildCardMatcher, SectionCastingEngine
|
|
)
|
|
except ImportError:
|
|
get_fingerprint_db = None
|
|
get_family_tracker = None
|
|
WildCardMatcher = None
|
|
SectionCastingEngine = None
|
|
|
|
# FASE 7: Self-AI
|
|
from self_ai import AutoPrompter, CritiqueEngine, AutoFixEngine
|
|
|
|
# FASE 4: Soundscape
|
|
from audio_soundscape import SoundscapeEngine, FXEngine, TonalAnalyzer
|
|
|
|
# FASE 4: Key Compatibility Matrix (T051-T062)
|
|
from audio_key_compatibility import (
|
|
KeyCompatibilityMatrix,
|
|
get_key_matrix, get_tonal_analyzer
|
|
)
|
|
|
|
# FASE 5: Arrangement
|
|
from audio_arrangement import DJArrangementEngine, TransitionEngine
|
|
|
|
# FASE 6: Mastering
|
|
from audio_mastering import MasterChain, LoudnessAnalyzer, QASuite, MasteringPreset
|
|
|
|
# T101-T104: Bus Routing Fix
|
|
try:
|
|
from bus_routing_fix import get_routing_fixer, BusRoutingRules
|
|
except ImportError:
|
|
get_routing_fixer = None
|
|
BusRoutingRules = None
|
|
|
|
# T105-T106: Validation System Fix
|
|
try:
|
|
from validation_system_fix import get_validation_fixer, ValidationIssue
|
|
except ImportError:
|
|
get_validation_fixer = None
|
|
ValidationIssue = None
|
|
|
|
# Configuración de logging
|
|
logging.basicConfig(
|
|
level=logging.INFO,
|
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
)
|
|
logger = logging.getLogger("AbletonMCP-AI")
|
|
|
|
# ============================================================================
|
|
# ERROR HANDLING INFRASTRUCTURE
|
|
# ============================================================================
|
|
|
|
class MCPError(Exception):
|
|
"""Base exception for MCP tool errors with structured error response."""
|
|
|
|
def __init__(self, message: str, error_code: str = "GENERAL_ERROR", details: Optional[Dict[str, Any]] = None):
|
|
super().__init__(message)
|
|
self.message = message
|
|
self.error_code = error_code
|
|
self.details = details or {}
|
|
|
|
def to_response(self) -> str:
|
|
"""Return a structured error message for MCP clients."""
|
|
return f"[ERROR:{self.error_code}] {self.message}"
|
|
|
|
|
|
class ConnectionError(MCPError):
|
|
"""Error connecting to Ableton Live."""
|
|
|
|
def __init__(self, message: str = "Cannot connect to Ableton Live", details: Optional[Dict[str, Any]] = None):
|
|
super().__init__(message, "CONNECTION_ERROR", details)
|
|
|
|
|
|
class ValidationError(MCPError):
|
|
"""Invalid parameter value."""
|
|
|
|
def __init__(self, param_name: str, value: Any, expected: str, details: Optional[Dict[str, Any]] = None):
|
|
message = f"Invalid parameter '{param_name}': got '{value}', expected {expected}"
|
|
super().__init__(message, "VALIDATION_ERROR", details)
|
|
self.param_name = param_name
|
|
self.value = value
|
|
self.expected = expected
|
|
|
|
|
|
class TimeoutError(MCPError):
|
|
"""Operation timed out."""
|
|
|
|
def __init__(self, operation: str, timeout_seconds: float, details: Optional[Dict[str, Any]] = None):
|
|
message = f"Operation '{operation}' timed out after {timeout_seconds}s"
|
|
super().__init__(message, "TIMEOUT_ERROR", details)
|
|
self.operation = operation
|
|
self.timeout_seconds = timeout_seconds
|
|
|
|
|
|
class DependencyError(MCPError):
|
|
"""Required dependency/module not available."""
|
|
|
|
def __init__(self, module_name: str, details: Optional[Dict[str, Any]] = None):
|
|
message = f"Required module '{module_name}' is not available"
|
|
super().__init__(message, "DEPENDENCY_ERROR", details)
|
|
self.module_name = module_name
|
|
|
|
|
|
class AbletonResponseError(MCPError):
|
|
"""Ableton returned an error response."""
|
|
|
|
def __init__(self, command: str, response: Dict[str, Any], details: Optional[Dict[str, Any]] = None):
|
|
message = response.get("message", f"Ableton error for command '{command}'")
|
|
super().__init__(message, "ABLETON_ERROR", details)
|
|
self.command = command
|
|
self.response = response
|
|
|
|
|
|
def _log_error(error: Exception, context: str = "", include_traceback: bool = True) -> None:
|
|
"""Log an error with optional context and traceback."""
|
|
error_type = type(error).__name__
|
|
error_msg = str(error)
|
|
|
|
if context:
|
|
logger.error(f"[{context}] {error_type}: {error_msg}")
|
|
else:
|
|
logger.error(f"{error_type}: {error_msg}")
|
|
|
|
if include_traceback and logger.isEnabledFor(logging.DEBUG):
|
|
import traceback
|
|
logger.debug(traceback.format_exc())
|
|
|
|
|
|
def _validate_range(value: Any, name: str, min_val: float, max_val: float) -> float:
|
|
"""Validate that a value is within a range."""
|
|
try:
|
|
num_val = float(value)
|
|
except (TypeError, ValueError):
|
|
raise ValidationError(name, value, f"number between {min_val} and {max_val}")
|
|
|
|
if not min_val <= num_val <= max_val:
|
|
raise ValidationError(name, value, f"number between {min_val} and {max_val}")
|
|
|
|
return num_val
|
|
|
|
|
|
def _linear_to_live_slider(linear_vol: float) -> float:
|
|
"""
|
|
Convierte una amplitud lineal (0.0 - 1.0) al valor de slider de Ableton (0.0 - 1.0).
|
|
En la API de Ableton, un valor de slider de 0.85 equivale a 0 dB.
|
|
|
|
Los valores en ROLE_GAIN_CALIBRATION ya estan calibrados donde kick=0.85 es el ancla.
|
|
Solo aplicamos la curva de potencia (sqrt) para la percepcion logaritmica del volumen.
|
|
No multiplicamos por 0.85 porque los valores de configuracion ya estan en la escala correcta.
|
|
"""
|
|
if linear_vol <= 0.001:
|
|
return 0.0
|
|
clamped = max(0.0, min(1.0, linear_vol))
|
|
return round(clamped ** 0.5, 3)
|
|
|
|
def _linear_to_live_slider_bus(linear_vol: float) -> float:
|
|
"""
|
|
Similar a slider normal, pero sin el factor de atenuacion de 0.85,
|
|
ideado especificamente para compensar el headroom de los Buses RCA.
|
|
"""
|
|
if linear_vol <= 0.001:
|
|
return 0.0
|
|
clamped = max(0.0, min(1.0, linear_vol))
|
|
return round(clamped ** 0.5, 3)
|
|
|
|
|
|
def _validate_int(value: Any, name: str, min_val: int = None, max_val: int = None) -> int:
|
|
"""Validate that a value is an integer within optional bounds."""
|
|
try:
|
|
int_val = int(value)
|
|
except (TypeError, ValueError):
|
|
raise ValidationError(name, value, "integer")
|
|
|
|
if min_val is not None and int_val < min_val:
|
|
raise ValidationError(name, value, f"integer >= {min_val}")
|
|
if max_val is not None and int_val > max_val:
|
|
raise ValidationError(name, value, f"integer <= {max_val}")
|
|
|
|
return int_val
|
|
|
|
|
|
def _validate_string(value: Any, name: str, allow_empty: bool = False) -> str:
|
|
"""Validate that a value is a string."""
|
|
if value is None:
|
|
if allow_empty:
|
|
return ""
|
|
raise ValidationError(name, value, "non-empty string")
|
|
|
|
str_val = str(value).strip()
|
|
if not allow_empty and not str_val:
|
|
raise ValidationError(name, value, "non-empty string")
|
|
|
|
return str_val
|
|
|
|
|
|
def _validate_json(value: Any, name: str) -> Any:
|
|
"""Validate and parse a JSON string."""
|
|
if isinstance(value, (dict, list)):
|
|
return value
|
|
|
|
try:
|
|
return json.loads(str(value))
|
|
except json.JSONDecodeError as e:
|
|
raise ValidationError(name, value, f"valid JSON: {e}")
|
|
|
|
|
|
def _handle_tool_error(error: Exception, operation: str = "") -> str:
|
|
"""Handle errors in MCP tools and return user-friendly message."""
|
|
_log_error(error, context=operation)
|
|
|
|
if isinstance(error, MCPError):
|
|
return error.to_response()
|
|
|
|
return f"[ERROR:GENERAL_ERROR] {operation}: {str(error)}"
|
|
|
|
# ============================================================================
|
|
# GENERATION MANIFEST STORAGE
|
|
# ============================================================================
|
|
|
|
# Manifest de la última generación
|
|
_last_generation_manifest: Dict[str, Any] = {}
|
|
|
|
def _store_generation_manifest(manifest: Dict[str, Any]) -> None:
|
|
"""Almacena el manifest de la generación actual."""
|
|
global _last_generation_manifest
|
|
_last_generation_manifest = manifest.copy()
|
|
logger.debug("Stored generation manifest with %d keys", len(manifest))
|
|
|
|
def _get_stored_manifest() -> Dict[str, Any]:
|
|
"""Retorna el manifest de la última generación."""
|
|
return _last_generation_manifest.copy()
|
|
|
|
def _build_transition_event_summary(config: Dict[str, Any]) -> Dict[str, Any]:
|
|
"""
|
|
Build summary of transition events from config.
|
|
|
|
Returns dict with:
|
|
- total_events: int
|
|
- event_types: list of unique fill types used
|
|
- count_by_type: dict of fill type -> count
|
|
- track_roles: list of roles that received transition material
|
|
- note_count: total number of notes across all events
|
|
"""
|
|
transition_events = config.get('transition_events', [])
|
|
|
|
if not transition_events:
|
|
return {
|
|
'total_events': 0,
|
|
'event_types': [],
|
|
'count_by_type': {},
|
|
'track_roles': [],
|
|
'note_count': 0
|
|
}
|
|
|
|
# Count by fill type
|
|
count_by_type: Dict[str, int] = {}
|
|
track_roles: set = set()
|
|
total_notes = 0
|
|
|
|
for event in transition_events:
|
|
fill_name = event.get('fill', 'unknown')
|
|
count_by_type[fill_name] = count_by_type.get(fill_name, 0) + 1
|
|
|
|
# Track roles that received material
|
|
if 'materialized_track_roles' in event:
|
|
roles = event.get('materialized_track_roles', [])
|
|
else:
|
|
roles = event.get('roles', [])
|
|
if isinstance(roles, list):
|
|
track_roles.update(roles)
|
|
|
|
# Count notes if available
|
|
notes_count = event.get('materialized_notes_count', event.get('notes_count', 0))
|
|
if isinstance(notes_count, (int, float)):
|
|
total_notes += int(notes_count)
|
|
|
|
return {
|
|
'total_events': len(transition_events),
|
|
'event_types': list(count_by_type.keys()),
|
|
'count_by_type': count_by_type,
|
|
'track_roles': sorted(list(track_roles)),
|
|
'note_count': total_notes,
|
|
'materialized': bool(config.get('transition_materialization', {}).get('materialized', total_notes > 0)),
|
|
}
|
|
|
|
# Importar nuevo sistema de samples
|
|
try:
|
|
from .sample_manager import SampleManager, get_manager as get_sample_manager
|
|
from .sample_selector import (
|
|
SampleSelector,
|
|
get_selector,
|
|
select_samples_for_track,
|
|
get_drum_kit,
|
|
reset_cross_generation_memory,
|
|
)
|
|
from .audio_analyzer import analyze_sample, AudioAnalyzer
|
|
sample_manager_factory = get_sample_manager
|
|
SAMPLE_SYSTEM_AVAILABLE = True
|
|
except ImportError:
|
|
try:
|
|
from sample_manager import SampleManager, get_manager as get_sample_manager
|
|
from sample_selector import (
|
|
SampleSelector,
|
|
get_selector,
|
|
select_samples_for_track,
|
|
get_drum_kit,
|
|
reset_cross_generation_memory,
|
|
)
|
|
from audio_analyzer import analyze_sample, AudioAnalyzer
|
|
sample_manager_factory = get_sample_manager
|
|
SAMPLE_SYSTEM_AVAILABLE = True
|
|
except ImportError as e2:
|
|
logger.warning(f"Sistema de samples no disponible: {e2}")
|
|
SampleManager = None
|
|
SampleSelector = None
|
|
AudioAnalyzer = None
|
|
analyze_sample = None
|
|
get_selector = None
|
|
select_samples_for_track = None
|
|
get_drum_kit = None
|
|
reset_cross_generation_memory = None
|
|
sample_manager_factory = None
|
|
SAMPLE_SYSTEM_AVAILABLE = False
|
|
|
|
|
|
# Importar sistema de role matching (Phase 4)
|
|
try:
|
|
from .role_matcher import (
|
|
validate_role_for_sample,
|
|
log_matching_decision,
|
|
enhance_sample_matching,
|
|
resolve_role_from_alias,
|
|
get_bus_for_role,
|
|
filter_aggressive_samples,
|
|
create_enhanced_match_report,
|
|
get_role_info,
|
|
VALID_ROLES,
|
|
ROLE_ALIASES,
|
|
ROLE_SCORE_THRESHOLDS,
|
|
AGGRESSIVE_KEYWORDS,
|
|
GENRE_APPROPRIATE_AGGRESSIVE,
|
|
)
|
|
ROLE_MATCHER_AVAILABLE = True
|
|
except ImportError:
|
|
try:
|
|
from role_matcher import (
|
|
validate_role_for_sample,
|
|
log_matching_decision,
|
|
enhance_sample_matching,
|
|
resolve_role_from_alias,
|
|
get_bus_for_role,
|
|
filter_aggressive_samples,
|
|
create_enhanced_match_report,
|
|
get_role_info,
|
|
VALID_ROLES,
|
|
ROLE_ALIASES,
|
|
ROLE_SCORE_THRESHOLDS,
|
|
AGGRESSIVE_KEYWORDS,
|
|
GENRE_APPROPRIATE_AGGRESSIVE,
|
|
)
|
|
ROLE_MATCHER_AVAILABLE = True
|
|
except ImportError as e2:
|
|
logger.warning(f"Role matcher no disponible: {e2}")
|
|
validate_role_for_sample = None
|
|
log_matching_decision = None
|
|
enhance_sample_matching = None
|
|
resolve_role_from_alias = None
|
|
get_bus_for_role = None
|
|
filter_aggressive_samples = None
|
|
create_enhanced_match_report = None
|
|
get_role_info = None
|
|
VALID_ROLES = {}
|
|
ROLE_ALIASES = {}
|
|
ROLE_SCORE_THRESHOLDS = {}
|
|
AGGRESSIVE_KEYWORDS = set()
|
|
GENRE_APPROPRIATE_AGGRESSIVE = set()
|
|
ROLE_MATCHER_AVAILABLE = False
|
|
|
|
# Constantes
|
|
DEFAULT_PORT = 9877
|
|
HOST = "127.0.0.1"
|
|
PROJECT_SAMPLES_DIR = PACKAGE_DIR.parent / "librerias" / "organized_samples"
|
|
SAMPLES_DIR = str(PROJECT_SAMPLES_DIR)
|
|
MESSAGE_TERMINATOR = b"\n"
|
|
M4L_SAMPLER_PORT = 9879
|
|
M4L_DEVICE_NAME = "AbletonMCP_SamplerPro"
|
|
USER_LIBRARY_DIR = Path.home() / "Documents" / "Ableton" / "User Library"
|
|
M4L_MAX_AUDIO_EFFECT_DIR = USER_LIBRARY_DIR / "Presets" / "Audio Effects" / "Max Audio Effect"
|
|
PROJECT_M4L_DIR = PACKAGE_DIR / "MaxForLive"
|
|
PROJECT_M4L_SAMPLER_DEVICE = PROJECT_M4L_DIR / f"{M4L_DEVICE_NAME}.amxd"
|
|
INSTALLED_M4L_SAMPLER_DEVICE = M4L_MAX_AUDIO_EFFECT_DIR / f"{M4L_DEVICE_NAME}.amxd"
|
|
ABLETON_RESOURCES_DIR = PACKAGE_DIR.parent.parent
|
|
FACTORY_M4L_MAX_AUDIO_EFFECT_DIR = (
|
|
ABLETON_RESOURCES_DIR / "Max" / "resources" / "packages" / "Max for Live" / "patchers" / "Max Audio Effect"
|
|
)
|
|
FACTORY_M4L_SAMPLER_DEVICE = FACTORY_M4L_MAX_AUDIO_EFFECT_DIR / f"{M4L_DEVICE_NAME}.amxd"
|
|
HYBRID_DRUM_TRACK_NAME = "HYBRID DRUMS"
|
|
HYBRID_DRUM_TRACK_COLOR = 20
|
|
AUDIO_FALLBACK_TRACK_SPECS = (
|
|
("AUDIO KICK", "kick", 10, 0.9),
|
|
("AUDIO CLAP", "snare", 45, 0.78),
|
|
("AUDIO HAT", "hat", 5, 0.64),
|
|
("AUDIO BASS", "bass", 30, 0.82),
|
|
)
|
|
AUDIO_OPTIONAL_FALLBACK_TRACK_SPECS = (
|
|
("AUDIO PERC MAIN", "perc_loop", 20, 0.68),
|
|
("AUDIO PERC ALT", "perc_alt", 22, 0.62),
|
|
("AUDIO TOP LOOP", "top_loop", 24, 0.54),
|
|
("AUDIO SYNTH LOOP", "synth_loop", 50, 0.52),
|
|
("AUDIO SYNTH PEAK", "synth_peak", 52, 0.5),
|
|
("AUDIO VOCAL LOOP", "vocal_loop", 40, 0.62),
|
|
("AUDIO VOCAL BUILD", "vocal_build", 42, 0.58),
|
|
("AUDIO VOCAL PEAK", "vocal_peak", 43, 0.6),
|
|
("AUDIO CRASH FX", "crash_fx", 26, 0.46),
|
|
("AUDIO TRANSITION FILL", "fill_fx", 28, 0.52),
|
|
("AUDIO SNARE ROLL", "snare_roll", 27, 0.5),
|
|
("AUDIO ATMOS", "atmos_fx", 54, 0.44),
|
|
("AUDIO VOCAL SHOT", "vocal_shot", 41, 0.52),
|
|
)
|
|
REFERENCE_AUDIO_MUTE_MAP = {
|
|
"AUDIO KICK": ("KICK",),
|
|
"AUDIO CLAP": ("CLAP",),
|
|
"AUDIO HAT": ("HAT CLOSED", "HAT OPEN", "TOP LOOP"),
|
|
"AUDIO BASS LOOP": ("BASS", "SUB BASS"),
|
|
"AUDIO PERC MAIN": ("PERC", "PERCUSSION"),
|
|
"AUDIO PERC ALT": ("RIDE",),
|
|
"AUDIO TOP LOOP": ("TOP LOOP", "HAT OPEN", "PERCUSSION"),
|
|
"AUDIO SYNTH LOOP": ("STAB", "COUNTER", "PLUCK", "ARP"),
|
|
"AUDIO SYNTH PEAK": ("LEAD", "STAB", "COUNTER", "PLUCK", "CHORDS", "ARP"),
|
|
"AUDIO VOCAL LOOP": ("VOCAL", "VOCAL CHOP"),
|
|
"AUDIO VOCAL BUILD": ("VOCAL", "VOCAL CHOP", "ATMOS"),
|
|
"AUDIO VOCAL PEAK": ("VOCAL", "VOCAL CHOP", "LEAD"),
|
|
"AUDIO CRASH FX": ("CRASH", "IMPACT FX"),
|
|
"AUDIO TRANSITION FILL": ("TOM FILL", "SNARE FILL", "REVERSE FX"),
|
|
"AUDIO SNARE ROLL": ("SNARE FILL", "RISER FX"),
|
|
"AUDIO ATMOS": ("ATMOS", "DRONE", "PAD"),
|
|
"AUDIO VOCAL SHOT": ("VOCAL", "VOCAL CHOP", "COUNTER"),
|
|
"AUDIO RESAMPLE REVERSE FX": ("REVERSE FX", "RISER FX", "IMPACT FX"),
|
|
"AUDIO RESAMPLE RISER": ("RISER FX", "REVERSE FX", "ATMOS"),
|
|
"AUDIO RESAMPLE DOWNLIFTER": ("ATMOS", "REVERSE FX", "IMPACT FX"),
|
|
"AUDIO RESAMPLE STUTTER": ("VOCAL", "VOCAL CHOP", "COUNTER"),
|
|
}
|
|
|
|
AUDIO_TRACK_BUS_KEYS = {
|
|
"AUDIO KICK": "drums",
|
|
"AUDIO CLAP": "drums",
|
|
"AUDIO HAT": "drums",
|
|
"AUDIO PERC": "drums",
|
|
"AUDIO PERC MAIN": "drums",
|
|
"AUDIO PERC ALT": "drums",
|
|
"AUDIO TOP LOOP": "drums",
|
|
"AUDIO CRASH FX": "drums",
|
|
"AUDIO TRANSITION FILL": "drums",
|
|
"AUDIO SNARE ROLL": "drums",
|
|
"AUDIO BASS": "bass",
|
|
"AUDIO BASS LOOP": "bass",
|
|
"AUDIO SYNTH LOOP": "music",
|
|
"AUDIO SYNTH PEAK": "music",
|
|
"AUDIO VOCAL": "vocal",
|
|
"AUDIO VOCAL LOOP": "vocal",
|
|
"AUDIO VOCAL BUILD": "vocal",
|
|
"AUDIO VOCAL PEAK": "vocal",
|
|
"AUDIO VOCAL SHOT": "vocal",
|
|
"AUDIO ATMOS": "fx",
|
|
"AUDIO RESAMPLE REVERSE FX": "fx",
|
|
"AUDIO RESAMPLE RISER": "fx",
|
|
"AUDIO RESAMPLE DOWNLIFTER": "fx",
|
|
"AUDIO RESAMPLE STUTTER": "vocal",
|
|
HYBRID_DRUM_TRACK_NAME.upper(): "drums",
|
|
}
|
|
|
|
BUS_ROUTING_MAP = {
|
|
"kick": {"drums"},
|
|
"snare": {"drums"},
|
|
"clap": {"drums"},
|
|
"hat": {"drums"},
|
|
"perc": {"drums"},
|
|
"ride": {"drums"},
|
|
"tom": {"drums"},
|
|
"crash": {"drums", "fx"},
|
|
"sub_bass": {"bass"},
|
|
"bass": {"bass"},
|
|
"chords": {"music"},
|
|
"pad": {"music"},
|
|
"pluck": {"music"},
|
|
"lead": {"music"},
|
|
"arp": {"music"},
|
|
"drone": {"music"},
|
|
"stab": {"music"},
|
|
"counter": {"music"},
|
|
"vocal": {"vocal"},
|
|
"vocal_chop": {"vocal"},
|
|
"reverse_fx": {"fx"},
|
|
"riser": {"fx"},
|
|
"impact": {"fx"},
|
|
"atmos": {"fx"},
|
|
}
|
|
|
|
COMMAND_TIMEOUTS = {
|
|
"reset": 30.0,
|
|
"generate_complete_song": 180.0,
|
|
"create_arrangement_audio_pattern": 45.0,
|
|
"load_device": 45.0,
|
|
}
|
|
_RECENT_LIBRARY_MATCHES = deque(maxlen=32)
|
|
|
|
# T014: Sistema de sample history persistente
|
|
SAMPLE_HISTORY_PATH = Path.home() / ".abletonmcp_ai" / "sample_history.json"
|
|
_sample_usage_history: Dict[str, Dict[str, Any]] = {}
|
|
|
|
# T029: Coverage Wheel - Seguimiento de uso por carpeta
|
|
COVERAGE_WHEEL_PATH = Path.home() / ".abletonmcp_ai" / "collection_coverage.json"
|
|
_coverage_wheel: Dict[str, Dict[str, Any]] = {}
|
|
|
|
def _load_sample_history() -> Dict[str, Dict[str, Any]]:
|
|
"""T014: Carga el historial de uso de samples desde disco."""
|
|
global _sample_usage_history
|
|
try:
|
|
if SAMPLE_HISTORY_PATH.exists():
|
|
with open(SAMPLE_HISTORY_PATH, 'r', encoding='utf-8') as f:
|
|
_sample_usage_history = json.load(f)
|
|
logger.info(f"✓ Sample history cargado: {len(_sample_usage_history)} samples")
|
|
else:
|
|
_sample_usage_history = {}
|
|
logger.info("Sample history inicializado (vacío)")
|
|
except Exception as e:
|
|
logger.warning(f"⚠ Error cargando sample history: {e}")
|
|
_sample_usage_history = {}
|
|
return _sample_usage_history
|
|
|
|
def _save_sample_history() -> None:
|
|
"""T014: Guarda el historial de uso de samples a disco."""
|
|
try:
|
|
SAMPLE_HISTORY_PATH.parent.mkdir(parents=True, exist_ok=True)
|
|
with open(SAMPLE_HISTORY_PATH, 'w', encoding='utf-8') as f:
|
|
json.dump(_sample_usage_history, f, indent=2)
|
|
logger.debug(f"Sample history guardado: {len(_sample_usage_history)} samples")
|
|
except Exception as e:
|
|
logger.warning(f"⚠ Error guardando sample history: {e}")
|
|
|
|
def _load_coverage_wheel() -> Dict[str, Dict[str, Any]]:
|
|
"""T029: Carga el Coverage Wheel desde disco."""
|
|
global _coverage_wheel
|
|
try:
|
|
if COVERAGE_WHEEL_PATH.exists():
|
|
with open(COVERAGE_WHEEL_PATH, 'r', encoding='utf-8') as f:
|
|
_coverage_wheel = json.load(f)
|
|
logger.info(f"✓ Coverage Wheel cargado: {len(_coverage_wheel)} carpetas")
|
|
else:
|
|
_coverage_wheel = {}
|
|
logger.info("Coverage Wheel inicializado (vacío)")
|
|
except Exception as e:
|
|
logger.warning(f"⚠ Error cargando Coverage Wheel: {e}")
|
|
_coverage_wheel = {}
|
|
return _coverage_wheel
|
|
|
|
def _save_coverage_wheel() -> None:
|
|
"""T029: Guarda el Coverage Wheel a disco."""
|
|
try:
|
|
COVERAGE_WHEEL_PATH.parent.mkdir(parents=True, exist_ok=True)
|
|
with open(COVERAGE_WHEEL_PATH, 'w', encoding='utf-8') as f:
|
|
json.dump(_coverage_wheel, f, indent=2)
|
|
logger.debug(f"Coverage Wheel guardado: {len(_coverage_wheel)} carpetas")
|
|
except Exception as e:
|
|
logger.warning(f"⚠ Error guardando Coverage Wheel: {e}")
|
|
|
|
def _update_sample_usage(sample_path: str, role: str) -> None:
|
|
"""T014: Actualiza el conteo de uso de un sample."""
|
|
global _sample_usage_history
|
|
if sample_path not in _sample_usage_history:
|
|
_sample_usage_history[sample_path] = {}
|
|
if role not in _sample_usage_history[sample_path]:
|
|
_sample_usage_history[sample_path][role] = {"uses": 0, "last_used": None}
|
|
|
|
_sample_usage_history[sample_path][role]["uses"] += 1
|
|
_sample_usage_history[sample_path][role]["last_used"] = time.time()
|
|
|
|
# T030: Actualizar Coverage Wheel
|
|
folder = str(Path(sample_path).parent)
|
|
if folder not in _coverage_wheel:
|
|
_coverage_wheel[folder] = {"uses": 0, "last_used": None, "samples": [], "generation_history": []}
|
|
|
|
if sample_path not in _coverage_wheel[folder]["samples"]:
|
|
_coverage_wheel[folder]["samples"].append(sample_path)
|
|
|
|
_coverage_wheel[folder]["uses"] += 1
|
|
_coverage_wheel[folder]["last_used"] = time.time()
|
|
|
|
# T025-T028: PALETTE LOCK SYSTEM
|
|
_current_palette: Dict[str, str] = {} # {drums: folder, bass: folder, music: folder}
|
|
_palette_lock_override: Optional[Dict[str, str]] = None # Para set_palette_lock()
|
|
|
|
def _select_anchor_folders(genre: str, key: str, bpm: float) -> Dict[str, str]:
|
|
"""
|
|
T025: Selecciona carpetas ancla por bus al inicio de cada generación.
|
|
|
|
Usa weighted random sampling por frescura (freshness = max(0, 10 - uses_last_10_gens)).
|
|
Mapea: drums_anchor, bass_anchor, music_anchor.
|
|
|
|
Retorna: {"drums": path, "bass": path, "music": path}
|
|
"""
|
|
global _current_palette, _palette_lock_override
|
|
|
|
# Si hay override manual, usarlo
|
|
if _palette_lock_override:
|
|
logger.info(f"🎨 Usando palette lock manual: {_palette_lock_override}")
|
|
_current_palette = _palette_lock_override.copy()
|
|
return _current_palette
|
|
|
|
# Definir patrones de búsqueda por bus
|
|
bus_patterns = {
|
|
"drums": ["*Kick*.wav", "*Drum*.wav", "*Perc*.wav", "*Loop*Drum*.wav"],
|
|
"bass": ["*Bass*.wav", "*Sub*.wav", "*808*.wav", "*Bassline*.wav"],
|
|
"music": ["*Synth*.wav", "*Chord*.wav", "*Pad*.wav", "*Lead*.wav", "*Arp*.wav"]
|
|
}
|
|
|
|
selected_anchors = {}
|
|
rng = random.Random(int(time.time()))
|
|
|
|
for bus, patterns in bus_patterns.items():
|
|
# Buscar carpetas candidatas
|
|
candidate_folders = _find_candidate_folders(patterns, limit=20)
|
|
|
|
if not candidate_folders:
|
|
logger.warning(f"⚠ No se encontraron carpetas para {bus}")
|
|
continue
|
|
|
|
# T031: Calcular frescura para cada carpeta
|
|
folder_weights = []
|
|
for folder in candidate_folders:
|
|
uses = _coverage_wheel.get(folder, {}).get("uses", 0)
|
|
last_used = _coverage_wheel.get(folder, {}).get("last_used", 0)
|
|
|
|
# Frescura: max(0, 10 - uses en últimas 10 generaciones aprox)
|
|
# Simulamos con uses totales ponderados por tiempo
|
|
hours_since_use = (time.time() - last_used) / 3600 if last_used else 999
|
|
recency_boost = min(5, hours_since_use / 24) # Boost por días sin uso
|
|
|
|
freshness = max(0, 10 - uses + recency_boost)
|
|
weight = max(1.0, freshness)
|
|
folder_weights.append((folder, weight))
|
|
|
|
# Weighted random sampling
|
|
total_weight = sum(w for _, w in folder_weights)
|
|
if total_weight == 0:
|
|
selected = candidate_folders[0]
|
|
else:
|
|
pick = rng.uniform(0, total_weight)
|
|
current = 0
|
|
for folder, weight in folder_weights:
|
|
current += weight
|
|
if pick <= current:
|
|
selected = folder
|
|
break
|
|
else:
|
|
selected = candidate_folders[-1]
|
|
|
|
selected_anchors[bus] = selected
|
|
logger.info(f"🎨 Anchor {bus}: {Path(selected).name} (frescura calculada)")
|
|
|
|
_current_palette = selected_anchors
|
|
return selected_anchors
|
|
|
|
def _find_candidate_folders(patterns: List[str], limit: int = 20) -> List[str]:
|
|
"""Encuentra carpetas candidatas que contienen samples matching patterns."""
|
|
folders = set()
|
|
try:
|
|
sample_manager = get_sample_manager()
|
|
if not sample_manager:
|
|
return []
|
|
|
|
for sample_path in sample_manager.samples.keys():
|
|
path = Path(sample_path)
|
|
if any(p.lower().replace('*', '') in path.name.lower() for p in patterns):
|
|
folders.add(str(path.parent))
|
|
if len(folders) >= limit:
|
|
break
|
|
except Exception as e:
|
|
logger.warning(f"Error buscando carpetas: {e}")
|
|
|
|
return list(folders)
|
|
|
|
def _is_compatible_folder(sample_path: str, anchor_folder: str) -> bool:
|
|
"""
|
|
Determina si un sample pertenece a una carpeta compatible con el ancla.
|
|
"""
|
|
sample_folder = str(Path(sample_path).parent)
|
|
|
|
# Misma carpeta = perfect match
|
|
if sample_folder == anchor_folder:
|
|
return True
|
|
|
|
# Subcarpeta de ancla
|
|
if sample_folder.startswith(anchor_folder):
|
|
return True
|
|
|
|
# Carpetas hermanas (mismo nivel)
|
|
if Path(sample_folder).parent == Path(anchor_folder).parent:
|
|
return True
|
|
|
|
return False
|
|
|
|
def _get_palette_bonus(sample_path: str, bus: str) -> float:
|
|
"""
|
|
T026: Calcula palette bonus para un sample.
|
|
|
|
- Folder ancla exacto: 1.4x
|
|
- Folder compatible: 1.2x
|
|
- Folder diferente: 0.9x
|
|
"""
|
|
global _current_palette
|
|
|
|
if bus not in _current_palette:
|
|
return 1.0 # Sin palette definido
|
|
|
|
anchor = _current_palette[bus]
|
|
|
|
if not anchor:
|
|
return 1.0
|
|
|
|
sample_folder = str(Path(sample_path).parent)
|
|
|
|
# Ancla exacto
|
|
if sample_folder == anchor:
|
|
return 1.4
|
|
|
|
# Compatible
|
|
if _is_compatible_folder(sample_path, anchor):
|
|
return 1.2
|
|
|
|
# Diferente
|
|
return 0.9
|
|
|
|
def _get_current_palette() -> Dict[str, str]:
|
|
"""Retorna el palette actual."""
|
|
return _current_palette.copy()
|
|
|
|
# T021: Sistema de fatiga persistente
|
|
SAMPLE_FATIGUE_PATH = Path.home() / ".abletonmcp_ai" / "sample_fatigue.json"
|
|
_sample_fatigue: Dict[str, Dict[str, Any]] = {}
|
|
|
|
def _load_sample_fatigue() -> Dict[str, Dict[str, Any]]:
|
|
"""T021: Carga la fatiga de samples desde disco."""
|
|
global _sample_fatigue
|
|
try:
|
|
if SAMPLE_FATIGUE_PATH.exists():
|
|
with open(SAMPLE_FATIGUE_PATH, 'r', encoding='utf-8') as f:
|
|
_sample_fatigue = json.load(f)
|
|
total_usages = sum(
|
|
data.get("uses", 0)
|
|
for roles in _sample_fatigue.values()
|
|
for data in roles.values()
|
|
)
|
|
logger.info(f"✓ Sample fatigue cargado: {len(_sample_fatigue)} samples, {total_usages} usos totales")
|
|
else:
|
|
_sample_fatigue = {}
|
|
logger.info("Sample fatigue inicializado (vacío)")
|
|
except Exception as e:
|
|
logger.warning(f"⚠ Error cargando sample fatigue: {e}")
|
|
_sample_fatigue = {}
|
|
return _sample_fatigue
|
|
|
|
def _save_sample_fatigue() -> None:
|
|
"""T021: Guarda la fatiga de samples a disco."""
|
|
try:
|
|
SAMPLE_FATIGUE_PATH.parent.mkdir(parents=True, exist_ok=True)
|
|
with open(SAMPLE_FATIGUE_PATH, 'w', encoding='utf-8') as f:
|
|
json.dump(_sample_fatigue, f, indent=2)
|
|
logger.debug(f"Sample fatigue guardado: {len(_sample_fatigue)} samples")
|
|
except Exception as e:
|
|
logger.warning(f"⚠ Error guardando sample fatigue: {e}")
|
|
|
|
def _update_sample_fatigue(sample_path: str, role: str) -> None:
|
|
"""T021: Actualiza el conteo de fatiga de un sample para un rol específico."""
|
|
global _sample_fatigue
|
|
if sample_path not in _sample_fatigue:
|
|
_sample_fatigue[sample_path] = {}
|
|
if role not in _sample_fatigue[sample_path]:
|
|
_sample_fatigue[sample_path][role] = {"uses": 0, "last_used": None}
|
|
|
|
_sample_fatigue[sample_path][role]["uses"] += 1
|
|
_sample_fatigue[sample_path][role]["last_used"] = time.time()
|
|
|
|
def _get_fatigue_factor(sample_path: str, role: str) -> float:
|
|
"""
|
|
T022: Factor de fatiga continuo.
|
|
Retorna multiplicador de score basado en usos previos.
|
|
|
|
- 0 usos: 1.0 (sin penalización)
|
|
- 1-3 usos: 0.75
|
|
- 4-10 usos: 0.50
|
|
- 10+ usos: 0.20 (casi bloqueado)
|
|
"""
|
|
if sample_path not in _sample_fatigue:
|
|
return 1.0
|
|
if role not in _sample_fatigue[sample_path]:
|
|
return 1.0
|
|
|
|
uses = _sample_fatigue[sample_path][role].get("uses", 0)
|
|
|
|
if uses == 0:
|
|
return 1.0
|
|
elif 1 <= uses <= 3:
|
|
return 0.75
|
|
elif 4 <= uses <= 10:
|
|
return 0.50
|
|
else: # 10+
|
|
return 0.20
|
|
|
|
def _reset_sample_fatigue(role: Optional[str] = None) -> Dict[str, Any]:
|
|
"""
|
|
T023: Resetea la fatiga de samples.
|
|
Si role es None, resetea toda la fatiga.
|
|
Si role es especificado, resetea solo ese rol.
|
|
"""
|
|
global _sample_fatigue
|
|
|
|
if role is None:
|
|
total_samples = len(_sample_fatigue)
|
|
_sample_fatigue = {}
|
|
_save_sample_fatigue()
|
|
logger.info(f"✓ Sample fatigue reseteada completamente ({total_samples} samples)")
|
|
return {"reset": "all", "samples_cleared": total_samples}
|
|
else:
|
|
# Resetear solo el rol especificado
|
|
cleared_count = 0
|
|
for sample_path in list(_sample_fatigue.keys()):
|
|
if role in _sample_fatigue[sample_path]:
|
|
del _sample_fatigue[sample_path][role]
|
|
cleared_count += 1
|
|
# Limpiar entry vacía
|
|
if not _sample_fatigue[sample_path]:
|
|
del _sample_fatigue[sample_path]
|
|
_save_sample_fatigue()
|
|
logger.info(f"✓ Sample fatigue reseteada para rol '{role}' ({cleared_count} entries)")
|
|
return {"reset": role, "entries_cleared": cleared_count}
|
|
|
|
def _get_sample_fatigue_report() -> Dict[str, Any]:
|
|
"""
|
|
T024: Genera reporte de fatiga de samples.
|
|
Retorna top-10 samples más usados por rol.
|
|
"""
|
|
report = {
|
|
"total_samples": len(_sample_fatigue),
|
|
"by_role": {},
|
|
"most_used_overall": []
|
|
}
|
|
|
|
# Agregar top-10 overall
|
|
all_samples = []
|
|
for sample_path, roles in _sample_fatigue.items():
|
|
total_uses = sum(data.get("uses", 0) for data in roles.values())
|
|
last_used = max(
|
|
(data.get("last_used", 0) for data in roles.values()),
|
|
default=0
|
|
)
|
|
all_samples.append({
|
|
"path": sample_path,
|
|
"total_uses": total_uses,
|
|
"last_used": last_used
|
|
})
|
|
|
|
all_samples.sort(key=lambda x: x["total_uses"], reverse=True)
|
|
report["most_used_overall"] = all_samples[:10]
|
|
|
|
return report
|
|
# Volumes aligned with ROLE_GAIN_CALIBRATION hierarchy
|
|
# Kick/bass as anchors, supporting elements progressively lower
|
|
# Headroom preserved for bus and master processing
|
|
AUDIO_LAYER_MIX_PROFILES = {
|
|
# DRUMS - Anchor elements at top of hierarchy
|
|
"AUDIO KICK": {
|
|
"pan": 0.0,
|
|
"volume": 0.85, # Anchor: same as kick MIDI
|
|
"sends": {"heat": 0.08, "glue": 0.08},
|
|
"fx_chain": [
|
|
{"device": "Saturator", "parameters": {"Drive": 1.5}},
|
|
],
|
|
},
|
|
"AUDIO CLAP": {
|
|
"pan": 0.0,
|
|
"volume": 0.78, # -1.5dB relativo a kick
|
|
"sends": {"space": 0.10, "echo": 0.04, "glue": 0.08},
|
|
"fx_chain": [
|
|
{"device": "Hybrid Reverb", "parameters": {"Dry/Wet": 0.06}},
|
|
],
|
|
},
|
|
"AUDIO HAT": {
|
|
"pan": 0.12,
|
|
"volume": 0.65, # -4dB relativo a kick
|
|
"sends": {"space": 0.04, "echo": 0.08, "glue": 0.04},
|
|
"fx_chain": [
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 12000.0, "Dry/Wet": 0.14}},
|
|
],
|
|
},
|
|
# BASS - Below drums
|
|
"AUDIO BASS": {
|
|
"pan": 0.0,
|
|
"volume": 0.78, # -1dB relativo a kick, same as bass MIDI
|
|
"sends": {"heat": 0.10, "glue": 0.10},
|
|
"fx_chain": [
|
|
{"device": "Saturator", "parameters": {"Drive": 2.0}},
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 7800.0, "Dry/Wet": 0.08}},
|
|
],
|
|
},
|
|
"AUDIO BASS LOOP": {
|
|
"pan": 0.0,
|
|
"volume": 0.78, # Same as bass
|
|
"sends": {"heat": 0.12, "glue": 0.10},
|
|
"fx_chain": [
|
|
{"device": "Saturator", "parameters": {"Drive": 2.2}},
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 7600.0, "Dry/Wet": 0.10}},
|
|
],
|
|
},
|
|
# PERCUSSION - Secondary rhythmic elements
|
|
"AUDIO PERC": {
|
|
"pan": 0.10,
|
|
"volume": 0.68, # -3.5dB
|
|
"sends": {"space": 0.08, "echo": 0.10, "glue": 0.06},
|
|
"fx_chain": [
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 9500.0, "Dry/Wet": 0.12}},
|
|
],
|
|
},
|
|
"AUDIO PERC MAIN": {
|
|
"pan": 0.12,
|
|
"volume": 0.68, # -3.5dB
|
|
"sends": {"space": 0.08, "echo": 0.10, "glue": 0.06},
|
|
"fx_chain": [
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 9800.0, "Dry/Wet": 0.12}},
|
|
],
|
|
},
|
|
"AUDIO PERC ALT": {
|
|
"pan": -0.12,
|
|
"volume": 0.62, # -5dB, secondary perc
|
|
"sends": {"space": 0.12, "echo": 0.14},
|
|
"fx_chain": [
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.10}},
|
|
],
|
|
},
|
|
"AUDIO TOP LOOP": {
|
|
"pan": -0.18,
|
|
"volume": 0.58, # -5.5dB, supporting rhythmic layer
|
|
"sends": {"space": 0.08, "echo": 0.16, "glue": 0.04},
|
|
"fx_chain": [
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 11200.0, "Dry/Wet": 0.16}},
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.06}},
|
|
],
|
|
},
|
|
# MUSIC - Harmony layers below rhythm
|
|
"AUDIO SYNTH LOOP": {
|
|
"pan": -0.08,
|
|
"volume": 0.65, # -4dB
|
|
"sends": {"space": 0.12, "echo": 0.14, "glue": 0.04},
|
|
"fx_chain": [
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 10500.0, "Dry/Wet": 0.14}},
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.08}},
|
|
],
|
|
},
|
|
"AUDIO SYNTH PEAK": {
|
|
"pan": 0.14,
|
|
"volume": 0.68, # -3.5dB, lead element
|
|
"sends": {"space": 0.16, "echo": 0.16, "glue": 0.05},
|
|
"fx_chain": [
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 9800.0, "Dry/Wet": 0.16}},
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.12}},
|
|
],
|
|
},
|
|
# VOCAL - Present but under drums
|
|
"AUDIO VOCAL": {
|
|
"pan": 0.08,
|
|
"volume": 0.68, # -3dB
|
|
"sends": {"space": 0.14, "echo": 0.18},
|
|
"fx_chain": [
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.12}},
|
|
],
|
|
},
|
|
"AUDIO VOCAL LOOP": {
|
|
"pan": 0.08,
|
|
"volume": 0.68,
|
|
"sends": {"space": 0.14, "echo": 0.20},
|
|
"fx_chain": [
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.14}},
|
|
{"device": "Hybrid Reverb", "parameters": {"Dry/Wet": 0.06}},
|
|
],
|
|
},
|
|
"AUDIO VOCAL BUILD": {
|
|
"pan": -0.08,
|
|
"volume": 0.65, # Lower during build
|
|
"sends": {"space": 0.18, "echo": 0.22},
|
|
"fx_chain": [
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.16}},
|
|
{"device": "Hybrid Reverb", "parameters": {"Dry/Wet": 0.08}},
|
|
],
|
|
},
|
|
"AUDIO VOCAL PEAK": {
|
|
"pan": 0.0,
|
|
"volume": 0.70, # Higher during peak
|
|
"sends": {"space": 0.16, "echo": 0.18, "glue": 0.03},
|
|
"fx_chain": [
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.10}},
|
|
{"device": "Hybrid Reverb", "parameters": {"Dry/Wet": 0.05}},
|
|
],
|
|
},
|
|
# FX - Deep in the mix
|
|
"AUDIO CRASH FX": {
|
|
"pan": 0.0,
|
|
"volume": 0.50, # -7dB, transient
|
|
"sends": {"space": 0.22, "echo": 0.10, "glue": 0.03},
|
|
"fx_chain": [
|
|
{"device": "Hybrid Reverb", "parameters": {"Dry/Wet": 0.10}},
|
|
],
|
|
},
|
|
"AUDIO TRANSITION FILL": {
|
|
"pan": -0.06,
|
|
"volume": 0.55, # -6dB
|
|
"sends": {"space": 0.12, "echo": 0.14, "heat": 0.06},
|
|
"fx_chain": [
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 9200.0, "Dry/Wet": 0.12}},
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.06}},
|
|
],
|
|
},
|
|
"AUDIO SNARE ROLL": {
|
|
"pan": 0.0,
|
|
"volume": 0.60, # -5dB, build tension
|
|
"sends": {"space": 0.10, "echo": 0.20, "heat": 0.04},
|
|
"fx_chain": [
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 10800.0, "Dry/Wet": 0.14}},
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.10}},
|
|
],
|
|
},
|
|
"AUDIO ATMOS": {
|
|
"pan": -0.12,
|
|
"volume": 0.48, # -8dB, background texture
|
|
"sends": {"space": 0.28, "echo": 0.06, "glue": 0.02},
|
|
"fx_chain": [
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 7800.0, "Dry/Wet": 0.14}},
|
|
{"device": "Hybrid Reverb", "parameters": {"Dry/Wet": 0.10}},
|
|
],
|
|
},
|
|
"AUDIO VOCAL SHOT": {
|
|
"pan": 0.10,
|
|
"volume": 0.62, # -5dB
|
|
"sends": {"space": 0.18, "echo": 0.22},
|
|
"fx_chain": [
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.14}},
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 9800.0, "Dry/Wet": 0.12}},
|
|
],
|
|
},
|
|
# RESAMPLE - Derived FX layers, deep in mix
|
|
"AUDIO RESAMPLE REVERSE FX": {
|
|
"volume": 0.48, # -8dB, effect layer
|
|
"pan": 0.0,
|
|
"sends": {"space": 0.32, "echo": 0.18, "heat": 0.06},
|
|
"fx_chain": [
|
|
{"device": "Hybrid Reverb", "parameters": {"Dry/Wet": 0.18}},
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 9400.0, "Dry/Wet": 0.10}},
|
|
{"device": "Saturator", "parameters": {"Drive": 1.4}},
|
|
],
|
|
},
|
|
"AUDIO RESAMPLE RISER": {
|
|
"volume": 0.52, # -7dB, builds up naturally
|
|
"pan": 0.0,
|
|
"sends": {"space": 0.36, "echo": 0.24, "heat": 0.08},
|
|
"fx_chain": [
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.18}},
|
|
{"device": "Hybrid Reverb", "parameters": {"Dry/Wet": 0.14}},
|
|
{"device": "Saturator", "parameters": {"Drive": 2.0}},
|
|
],
|
|
},
|
|
"AUDIO RESAMPLE DOWNLIFTER": {
|
|
"volume": 0.45, # -9dB, transitional
|
|
"pan": -0.08,
|
|
"sends": {"space": 0.28, "echo": 0.12},
|
|
"fx_chain": [
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 8800.0, "Dry/Wet": 0.14}},
|
|
{"device": "Hybrid Reverb", "parameters": {"Dry/Wet": 0.12}},
|
|
],
|
|
},
|
|
"AUDIO RESAMPLE STUTTER": {
|
|
"volume": 0.50, # -8dB
|
|
"pan": 0.12,
|
|
"sends": {"space": 0.18, "echo": 0.32, "glue": 0.04},
|
|
"fx_chain": [
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.24}},
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 10600.0, "Dry/Wet": 0.10}},
|
|
{"device": "Saturator", "parameters": {"Drive": 1.2}},
|
|
],
|
|
},
|
|
}
|
|
|
|
TRACK_INDEX_COMMANDS = {
|
|
"set_track_name",
|
|
"set_track_color",
|
|
"set_track_volume",
|
|
"set_track_pan",
|
|
"set_track_send",
|
|
"set_track_mute",
|
|
"set_track_solo",
|
|
"set_track_arm",
|
|
"delete_track",
|
|
}
|
|
|
|
CLIP_SCENE_COMMANDS = {
|
|
"create_clip",
|
|
"delete_clip",
|
|
"duplicate_clip",
|
|
"set_clip_name",
|
|
"set_clip_color",
|
|
"fire_clip",
|
|
"stop_clip",
|
|
"add_notes",
|
|
"get_notes",
|
|
"remove_notes",
|
|
"set_notes",
|
|
"quantize_notes",
|
|
}
|
|
|
|
SCENE_INDEX_COMMANDS = {
|
|
"create_scene",
|
|
"delete_scene",
|
|
"fire_scene",
|
|
"set_scene_name",
|
|
"set_scene_color",
|
|
}
|
|
|
|
SONG_STRUCTURE_PRESETS = {
|
|
"minimal": [
|
|
("INTRO", 8, 12),
|
|
("GROOVE", 16, 20),
|
|
("BREAK", 8, 25),
|
|
("OUTRO", 8, 8),
|
|
],
|
|
"standard": [
|
|
("INTRO", 8, 12),
|
|
("BUILD", 8, 18),
|
|
("DROP A", 16, 28),
|
|
("BREAK", 8, 25),
|
|
("DROP B", 16, 30),
|
|
("OUTRO", 8, 8),
|
|
],
|
|
"extended": [
|
|
("INTRO DJ", 16, 10),
|
|
("BUILD A", 8, 18),
|
|
("DROP A", 16, 28),
|
|
("BREAKDOWN", 8, 25),
|
|
("BUILD B", 8, 18),
|
|
("DROP B", 16, 30),
|
|
("OUTRO DJ", 16, 8),
|
|
],
|
|
"club": [
|
|
("INTRO DJ", 16, 10),
|
|
("GROOVE A", 16, 14),
|
|
("VOCAL BUILD", 8, 18),
|
|
("DROP A", 16, 28),
|
|
("BREAKDOWN", 8, 25),
|
|
("BUILD B", 8, 18),
|
|
("DROP B", 16, 30),
|
|
("PEAK", 8, 32),
|
|
("OUTRO DJ", 16, 8),
|
|
],
|
|
}
|
|
|
|
# Perfiles de mezcla por genero
|
|
MIX_PROFILES = {
|
|
"tech-house": {
|
|
"bus_config": {
|
|
"drums": {"gain_db": 0.0, "pan": 0.0, "color": 10},
|
|
"bass": {"gain_db": -0.5, "pan": 0.0, "color": 30},
|
|
"music": {"gain_db": -2.0, "pan": 0.0, "color": 45},
|
|
"vocal": {"gain_db": -3.0, "pan": 0.0, "color": 60},
|
|
"fx": {"gain_db": -4.0, "pan": 0.0, "color": 75},
|
|
},
|
|
"returns": {
|
|
"heat": {"type": "Saturator", "gain_db": 0.0, "dry_wet": 1.0},
|
|
"glue": {"type": "Glue Compressor", "gain_db": 0.0, "dry_wet": 0.3},
|
|
"space": {"type": "Hybrid Reverb", "gain_db": -3.0, "dry_wet": 0.5},
|
|
"echo": {"type": "Echo", "gain_db": -6.0, "dry_wet": 0.4},
|
|
},
|
|
"device_chains": {
|
|
"drums": [
|
|
{"device": "Drum Buss", "parameters": {"Drive": 2.5, "Comp": 0.4}},
|
|
{"device": "Saturator", "parameters": {"Drive": 2.0, "Dry/Wet": 0.15}},
|
|
],
|
|
"bass": [
|
|
{"device": "Saturator", "parameters": {"Drive": 3.0, "Dry/Wet": 0.2}},
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 120.0, "Resonance": 0.3}},
|
|
],
|
|
"music": [
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 8000.0, "Dry/Wet": 0.1}},
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.12}},
|
|
],
|
|
"vocal": [
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.18}},
|
|
{"device": "Hybrid Reverb", "parameters": {"Dry/Wet": 0.1}},
|
|
],
|
|
"fx": [
|
|
{"device": "Hybrid Reverb", "parameters": {"Dry/Wet": 0.25}},
|
|
],
|
|
},
|
|
"automation_defaults": {
|
|
"intro": {"filter_cutoff_mult": 0.6, "reverb_wet_mult": 1.2, "delay_wet_mult": 0.8},
|
|
"build": {"filter_cutoff_mult": 1.0, "reverb_wet_mult": 1.4, "delay_wet_mult": 1.2},
|
|
"drop": {"filter_cutoff_mult": 1.0, "reverb_wet_mult": 0.6, "delay_wet_mult": 0.5},
|
|
"break": {"filter_cutoff_mult": 0.5, "reverb_wet_mult": 1.5, "delay_wet_mult": 1.0},
|
|
"outro": {"filter_cutoff_mult": 0.7, "reverb_wet_mult": 1.3, "delay_wet_mult": 1.1},
|
|
},
|
|
"loudness_target": {
|
|
"integrated_lufs": -8.0,
|
|
"true_peak_db": -1.0,
|
|
"lra": 6.0,
|
|
},
|
|
},
|
|
"house": {
|
|
"bus_config": {
|
|
"drums": {"gain_db": 0.0, "pan": 0.0, "color": 10},
|
|
"bass": {"gain_db": 0.0, "pan": 0.0, "color": 30},
|
|
"music": {"gain_db": -1.5, "pan": 0.0, "color": 45},
|
|
"vocal": {"gain_db": -2.0, "pan": 0.0, "color": 60},
|
|
"fx": {"gain_db": -3.5, "pan": 0.0, "color": 75},
|
|
},
|
|
"returns": {
|
|
"heat": {"type": "Saturator", "gain_db": 0.0, "dry_wet": 1.0},
|
|
"glue": {"type": "Glue Compressor", "gain_db": 0.0, "dry_wet": 0.25},
|
|
"space": {"type": "Hybrid Reverb", "gain_db": -2.0, "dry_wet": 0.45},
|
|
"echo": {"type": "Echo", "gain_db": -5.0, "dry_wet": 0.35},
|
|
},
|
|
"device_chains": {
|
|
"drums": [
|
|
{"device": "Drum Buss", "parameters": {"Drive": 2.0, "Comp": 0.35}},
|
|
],
|
|
"bass": [
|
|
{"device": "Saturator", "parameters": {"Drive": 2.5, "Dry/Wet": 0.18}},
|
|
],
|
|
"music": [
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 9000.0, "Dry/Wet": 0.12}},
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.15}},
|
|
],
|
|
"vocal": [
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.2}},
|
|
{"device": "Hybrid Reverb", "parameters": {"Dry/Wet": 0.15}},
|
|
],
|
|
"fx": [
|
|
{"device": "Hybrid Reverb", "parameters": {"Dry/Wet": 0.3}},
|
|
],
|
|
},
|
|
"automation_defaults": {
|
|
"intro": {"filter_cutoff_mult": 0.65, "reverb_wet_mult": 1.1, "delay_wet_mult": 0.9},
|
|
"build": {"filter_cutoff_mult": 0.95, "reverb_wet_mult": 1.3, "delay_wet_mult": 1.1},
|
|
"drop": {"filter_cutoff_mult": 1.0, "reverb_wet_mult": 0.7, "delay_wet_mult": 0.6},
|
|
"break": {"filter_cutoff_mult": 0.55, "reverb_wet_mult": 1.4, "delay_wet_mult": 0.9},
|
|
"outro": {"filter_cutoff_mult": 0.75, "reverb_wet_mult": 1.2, "delay_wet_mult": 1.0},
|
|
},
|
|
"loudness_target": {
|
|
"integrated_lufs": -7.0,
|
|
"true_peak_db": -0.5,
|
|
"lra": 5.5,
|
|
},
|
|
},
|
|
"techno": {
|
|
"bus_config": {
|
|
"drums": {"gain_db": 0.5, "pan": 0.0, "color": 10},
|
|
"bass": {"gain_db": -0.5, "pan": 0.0, "color": 30},
|
|
"music": {"gain_db": -2.5, "pan": 0.0, "color": 45},
|
|
"vocal": {"gain_db": -4.0, "pan": 0.0, "color": 60},
|
|
"fx": {"gain_db": -3.0, "pan": 0.0, "color": 75},
|
|
},
|
|
"returns": {
|
|
"heat": {"type": "Saturator", "gain_db": 1.0, "dry_wet": 1.0},
|
|
"glue": {"type": "Glue Compressor", "gain_db": 0.0, "dry_wet": 0.4},
|
|
"space": {"type": "Hybrid Reverb", "gain_db": -4.0, "dry_wet": 0.55},
|
|
"echo": {"type": "Echo", "gain_db": -8.0, "dry_wet": 0.45},
|
|
},
|
|
"device_chains": {
|
|
"drums": [
|
|
{"device": "Drum Buss", "parameters": {"Drive": 3.5, "Comp": 0.5}},
|
|
{"device": "Saturator", "parameters": {"Drive": 3.0, "Dry/Wet": 0.2}},
|
|
],
|
|
"bass": [
|
|
{"device": "Saturator", "parameters": {"Drive": 4.0, "Dry/Wet": 0.25}},
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 150.0, "Resonance": 0.4}},
|
|
],
|
|
"music": [
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 7000.0, "Dry/Wet": 0.15}},
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.2, "Feedback": 0.5}},
|
|
],
|
|
"vocal": [
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.25, "Feedback": 0.4}},
|
|
{"device": "Hybrid Reverb", "parameters": {"Dry/Wet": 0.12}},
|
|
],
|
|
"fx": [
|
|
{"device": "Hybrid Reverb", "parameters": {"Dry/Wet": 0.35}},
|
|
{"device": "Saturator", "parameters": {"Drive": 2.0, "Dry/Wet": 0.15}},
|
|
],
|
|
},
|
|
"automation_defaults": {
|
|
"intro": {"filter_cutoff_mult": 0.5, "reverb_wet_mult": 1.3, "delay_wet_mult": 1.0},
|
|
"build": {"filter_cutoff_mult": 0.9, "reverb_wet_mult": 1.5, "delay_wet_mult": 1.3},
|
|
"drop": {"filter_cutoff_mult": 1.0, "reverb_wet_mult": 0.5, "delay_wet_mult": 0.4},
|
|
"break": {"filter_cutoff_mult": 0.4, "reverb_wet_mult": 1.6, "delay_wet_mult": 1.2},
|
|
"outro": {"filter_cutoff_mult": 0.6, "reverb_wet_mult": 1.4, "delay_wet_mult": 1.1},
|
|
},
|
|
"loudness_target": {
|
|
"integrated_lufs": -9.0,
|
|
"true_peak_db": -1.5,
|
|
"lra": 7.0,
|
|
},
|
|
},
|
|
"progressive": {
|
|
"bus_config": {
|
|
"drums": {"gain_db": -0.5, "pan": 0.0, "color": 10},
|
|
"bass": {"gain_db": -1.0, "pan": 0.0, "color": 30},
|
|
"music": {"gain_db": -1.0, "pan": 0.0, "color": 45},
|
|
"vocal": {"gain_db": -1.5, "pan": 0.0, "color": 60},
|
|
"fx": {"gain_db": -2.5, "pan": 0.0, "color": 75},
|
|
},
|
|
"returns": {
|
|
"heat": {"type": "Saturator", "gain_db": -1.0, "dry_wet": 1.0},
|
|
"glue": {"type": "Glue Compressor", "gain_db": 0.0, "dry_wet": 0.2},
|
|
"space": {"type": "Hybrid Reverb", "gain_db": -1.0, "dry_wet": 0.6},
|
|
"echo": {"type": "Echo", "gain_db": -4.0, "dry_wet": 0.5},
|
|
},
|
|
"device_chains": {
|
|
"drums": [
|
|
{"device": "Drum Buss", "parameters": {"Drive": 1.5, "Comp": 0.25}},
|
|
],
|
|
"bass": [
|
|
{"device": "Saturator", "parameters": {"Drive": 2.0, "Dry/Wet": 0.12}},
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 100.0, "Resonance": 0.25}},
|
|
],
|
|
"music": [
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 10000.0, "Dry/Wet": 0.08}},
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.18, "Feedback": 0.6}},
|
|
{"device": "Hybrid Reverb", "parameters": {"Dry/Wet": 0.15}},
|
|
],
|
|
"vocal": [
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.22, "Feedback": 0.5}},
|
|
{"device": "Hybrid Reverb", "parameters": {"Dry/Wet": 0.2}},
|
|
],
|
|
"fx": [
|
|
{"device": "Hybrid Reverb", "parameters": {"Dry/Wet": 0.4}},
|
|
],
|
|
},
|
|
"automation_defaults": {
|
|
"intro": {"filter_cutoff_mult": 0.7, "reverb_wet_mult": 1.0, "delay_wet_mult": 1.0},
|
|
"build": {"filter_cutoff_mult": 0.85, "reverb_wet_mult": 1.2, "delay_wet_mult": 1.15},
|
|
"drop": {"filter_cutoff_mult": 1.0, "reverb_wet_mult": 0.8, "delay_wet_mult": 0.7},
|
|
"break": {"filter_cutoff_mult": 0.6, "reverb_wet_mult": 1.3, "delay_wet_mult": 0.95},
|
|
"outro": {"filter_cutoff_mult": 0.8, "reverb_wet_mult": 1.1, "delay_wet_mult": 1.05},
|
|
},
|
|
"loudness_target": {
|
|
"integrated_lufs": -6.0,
|
|
"true_peak_db": -0.3,
|
|
"lra": 5.0,
|
|
},
|
|
},
|
|
"melodic-techno": {
|
|
"bus_config": {
|
|
"drums": {"gain_db": 0.0, "pan": 0.0, "color": 10},
|
|
"bass": {"gain_db": -0.5, "pan": 0.0, "color": 30},
|
|
"music": {"gain_db": -1.5, "pan": 0.0, "color": 45},
|
|
"vocal": {"gain_db": -2.5, "pan": 0.0, "color": 60},
|
|
"fx": {"gain_db": -3.0, "pan": 0.0, "color": 75},
|
|
},
|
|
"returns": {
|
|
"heat": {"type": "Saturator", "gain_db": 0.5, "dry_wet": 1.0},
|
|
"glue": {"type": "Glue Compressor", "gain_db": 0.0, "dry_wet": 0.35},
|
|
"space": {"type": "Hybrid Reverb", "gain_db": -2.5, "dry_wet": 0.55},
|
|
"echo": {"type": "Echo", "gain_db": -6.0, "dry_wet": 0.45},
|
|
},
|
|
"device_chains": {
|
|
"drums": [
|
|
{"device": "Drum Buss", "parameters": {"Drive": 2.8, "Comp": 0.45}},
|
|
{"device": "Saturator", "parameters": {"Drive": 2.5, "Dry/Wet": 0.18}},
|
|
],
|
|
"bass": [
|
|
{"device": "Saturator", "parameters": {"Drive": 3.5, "Dry/Wet": 0.22}},
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 130.0, "Resonance": 0.35}},
|
|
],
|
|
"music": [
|
|
{"device": "Auto Filter", "parameters": {"Frequency": 7500.0, "Dry/Wet": 0.12}},
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.16, "Feedback": 0.55}},
|
|
{"device": "Hybrid Reverb", "parameters": {"Dry/Wet": 0.1}},
|
|
],
|
|
"vocal": [
|
|
{"device": "Echo", "parameters": {"Dry/Wet": 0.22, "Feedback": 0.45}},
|
|
{"device": "Hybrid Reverb", "parameters": {"Dry/Wet": 0.15}},
|
|
],
|
|
"fx": [
|
|
{"device": "Hybrid Reverb", "parameters": {"Dry/Wet": 0.38}},
|
|
{"device": "Saturator", "parameters": {"Drive": 1.5, "Dry/Wet": 0.1}},
|
|
],
|
|
},
|
|
"automation_defaults": {
|
|
"intro": {"filter_cutoff_mult": 0.55, "reverb_wet_mult": 1.2, "delay_wet_mult": 1.0},
|
|
"build": {"filter_cutoff_mult": 0.9, "reverb_wet_mult": 1.35, "delay_wet_mult": 1.2},
|
|
"drop": {"filter_cutoff_mult": 1.0, "reverb_wet_mult": 0.55, "delay_wet_mult": 0.5},
|
|
"break": {"filter_cutoff_mult": 0.45, "reverb_wet_mult": 1.5, "delay_wet_mult": 1.1},
|
|
"outro": {"filter_cutoff_mult": 0.65, "reverb_wet_mult": 1.3, "delay_wet_mult": 1.05},
|
|
},
|
|
"loudness_target": {
|
|
"integrated_lufs": -7.5,
|
|
"true_peak_db": -0.8,
|
|
"lra": 6.0,
|
|
},
|
|
},
|
|
}
|
|
|
|
|
|
def _windows_short_path(path: Union[str, Path]) -> str:
|
|
"""Convierte una ruta a su forma corta de Windows para evitar espacios en mensajes UDP."""
|
|
normalized = str(path)
|
|
if os.name != "nt":
|
|
return normalized
|
|
|
|
get_short_path = getattr(ctypes.windll.kernel32, "GetShortPathNameW", None)
|
|
if get_short_path is None:
|
|
return normalized
|
|
|
|
output_buffer_size = 4096
|
|
output_buffer = ctypes.create_unicode_buffer(output_buffer_size)
|
|
result = get_short_path(normalized, output_buffer, output_buffer_size)
|
|
if result == 0:
|
|
return normalized
|
|
return output_buffer.value or normalized
|
|
|
|
|
|
def _udp_safe_path(path: Union[str, Path]) -> str:
|
|
"""Normaliza rutas para mensajes simples de UDP hacia Max for Live."""
|
|
return _windows_short_path(path).replace("\\", "/")
|
|
|
|
|
|
# ============================================================================
|
|
# SECTION VARIATION - Feature 3.3
|
|
# ============================================================================
|
|
|
|
# Roles que pueden variar según la sección
|
|
SECTION_VARIATION_ROLES = {
|
|
'kick', 'clap', 'hat', 'perc', 'ride', 'top_loop',
|
|
'sub_bass', 'bass',
|
|
'chords', 'pad', 'pluck', 'arp', 'lead', 'counter',
|
|
'vocal', 'vocal_chop',
|
|
}
|
|
|
|
|
|
def _apply_section_variation_to_plan(plan: Dict[str, Any],
|
|
sections: List[Dict[str, Any]]) -> Dict[str, Any]:
|
|
"""
|
|
Aplica variación por sección al plan de referencia.
|
|
|
|
Para cada rol elegible, filtra/reordena samples según la sección.
|
|
"""
|
|
varied_plan = plan.copy()
|
|
|
|
# Obtener layers del plan
|
|
layers = plan.get('layers', [])
|
|
|
|
for section in sections:
|
|
section_kind = section.get('kind', 'unknown')
|
|
section_name = section.get('name', '')
|
|
section_start = section.get('start', 0)
|
|
|
|
# Para cada layer variante
|
|
for layer in layers:
|
|
role = layer.get('role', '')
|
|
|
|
if role not in SECTION_VARIATION_ROLES:
|
|
continue
|
|
|
|
# Obtener variante para esta sección
|
|
variant = _get_section_variant_for_role(role, section_kind, section_name)
|
|
|
|
if variant != 'standard':
|
|
# Marcar layer para variación en esta sección
|
|
if 'section_variants' not in layer:
|
|
layer['section_variants'] = {}
|
|
|
|
layer['section_variants'][section_start] = {
|
|
'variant': variant,
|
|
'section_kind': section_kind,
|
|
'section_name': section_name
|
|
}
|
|
|
|
logger.debug("SECTION_VARIATION: role '%s' will use variant '%s' in section '%s' (start=%.1f)",
|
|
role, variant, section_name, section_start)
|
|
|
|
varied_plan['layers'] = layers
|
|
return varied_plan
|
|
|
|
|
|
def _get_section_variant_for_role(role: str, section_kind: str, section_name: str) -> str:
|
|
"""Helper para obtener variante de sección para un rol."""
|
|
# Mapeo simple de sección a variante
|
|
kind_lower = section_kind.lower()
|
|
name_lower = section_name.lower()
|
|
|
|
# Detectar por nombre
|
|
if 'minimal' in name_lower or 'atmos' in name_lower:
|
|
return 'minimal'
|
|
if 'peak' in name_lower or 'main' in name_lower:
|
|
return 'full'
|
|
|
|
# Defaults por tipo
|
|
section_variants = {
|
|
'intro': 'sparse',
|
|
'verse': 'standard',
|
|
'build': 'building',
|
|
'drop': 'full',
|
|
'break': 'sparse',
|
|
'outro': 'fading'
|
|
}
|
|
|
|
return section_variants.get(kind_lower, 'standard')
|
|
|
|
|
|
def _filter_samples_by_variant(samples: List, variant: str) -> List:
|
|
"""Filtra samples según variante de sección."""
|
|
if variant == 'standard' or not samples:
|
|
return samples
|
|
|
|
filtered = []
|
|
for sample in samples:
|
|
name_lower = getattr(sample, 'name', '').lower()
|
|
|
|
# Variant sparse: buscar keywords sutiles
|
|
if variant == 'sparse' or variant == 'minimal':
|
|
if any(kw in name_lower for kw in ['light', 'soft', 'subtle', 'simple', 'minimal']):
|
|
filtered.insert(0, sample)
|
|
elif any(kw in name_lower for kw in ['heavy', 'full', 'busy', 'big']):
|
|
continue
|
|
else:
|
|
filtered.append(sample)
|
|
|
|
# Variant full: buscar keywords ricos
|
|
elif variant in ['full', 'peak', 'building']:
|
|
if any(kw in name_lower for kw in ['full', 'big', 'rich', 'heavy', 'peak']):
|
|
filtered.insert(0, sample)
|
|
elif any(kw in name_lower for kw in ['minimal', 'subtle']):
|
|
continue
|
|
else:
|
|
filtered.append(sample)
|
|
|
|
else:
|
|
filtered.append(sample)
|
|
|
|
return filtered if filtered else samples
|
|
|
|
|
|
# ============================================================================
|
|
# M4L DEVICE MANAGEMENT - Hardened Loading with Fallback
|
|
# ============================================================================
|
|
|
|
M4L_LOAD_TIMEOUT = 5.0 # seconds to wait for device load
|
|
M4L_UDP_TIMEOUT = 2.0 # seconds for UDP command timeout
|
|
|
|
|
|
def verify_m4l_device_files_exist() -> Dict[str, Any]:
|
|
"""
|
|
Verifica que los archivos de dispositivo M4L existen.
|
|
Retorna dict con estado de cada archivo y si el sistema M4L es utilizable.
|
|
"""
|
|
result = {
|
|
"sampler_exists": PROJECT_M4L_SAMPLER_DEVICE.exists() if PROJECT_M4L_SAMPLER_DEVICE else False,
|
|
"sampler_path": str(PROJECT_M4L_SAMPLER_DEVICE) if PROJECT_M4L_SAMPLER_DEVICE else None,
|
|
"engine_exists": False,
|
|
"engine_path": None,
|
|
"usable": False,
|
|
"missing": [],
|
|
}
|
|
|
|
if not result["sampler_exists"]:
|
|
result["missing"].append("AbletonMCP_SamplerPro.amxd")
|
|
|
|
engine_path = PROJECT_M4L_DIR / "AbletonMCP_Engine.amxd" if PROJECT_M4L_DIR else None
|
|
if engine_path:
|
|
result["engine_exists"] = engine_path.exists()
|
|
result["engine_path"] = str(engine_path)
|
|
if not result["engine_exists"]:
|
|
result["missing"].append("AbletonMCP_Engine.amxd")
|
|
|
|
result["usable"] = result["sampler_exists"]
|
|
return result
|
|
|
|
|
|
def ensure_m4l_sampler_device_installed() -> Optional[Path]:
|
|
"""
|
|
Copia el device M4L a ubicaciones que Live indexa como audio effects.
|
|
Retorna la ruta instalada o None si falla (en lugar de lanzar excepcion).
|
|
"""
|
|
try:
|
|
if not PROJECT_M4L_SAMPLER_DEVICE.exists():
|
|
logger.warning(f"Device M4L no encontrado: {PROJECT_M4L_SAMPLER_DEVICE}")
|
|
return None
|
|
|
|
install_targets = [
|
|
INSTALLED_M4L_SAMPLER_DEVICE,
|
|
FACTORY_M4L_SAMPLER_DEVICE,
|
|
]
|
|
|
|
installed_path = None
|
|
for target in install_targets:
|
|
try:
|
|
target.parent.mkdir(parents=True, exist_ok=True)
|
|
shutil.copy2(PROJECT_M4L_SAMPLER_DEVICE, target)
|
|
if installed_path is None:
|
|
installed_path = target
|
|
logger.debug(f"Device M4L copiado a: {target}")
|
|
except PermissionError as pe:
|
|
logger.debug(f"Sin permisos para copiar a {target}: {pe}")
|
|
except OSError as ose:
|
|
logger.debug(f"Error copiando a {target}: {ose}")
|
|
|
|
return installed_path or INSTALLED_M4L_SAMPLER_DEVICE
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error instalando device M4L: {e}")
|
|
return None
|
|
|
|
|
|
def send_m4l_sampler_command(command: str, *parts: Union[str, int, float]) -> bool:
|
|
"""
|
|
Envia un comando simple por UDP al device SamplerPro.
|
|
Retorna True si el envio fue exitoso, False si fallo.
|
|
"""
|
|
try:
|
|
payload_parts = [str(command)]
|
|
payload_parts.extend(str(part) for part in parts if part not in (None, ""))
|
|
payload = " ".join(payload_parts).encode("utf-8")
|
|
|
|
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
|
sock.settimeout(M4L_UDP_TIMEOUT)
|
|
try:
|
|
sock.sendto(payload, (HOST, M4L_SAMPLER_PORT))
|
|
return True
|
|
except socket.timeout:
|
|
logger.debug(f"Timeout enviando comando M4L: {command}")
|
|
return False
|
|
except OSError as ose:
|
|
logger.debug(f"Error de socket enviando comando M4L: {ose}")
|
|
return False
|
|
finally:
|
|
sock.close()
|
|
except Exception as e:
|
|
logger.debug(f"Error enviando comando M4L '{command}': {e}")
|
|
return False
|
|
|
|
|
|
def try_load_m4l_device_on_track(
|
|
ableton,
|
|
track_index: int,
|
|
device_name: str = M4L_DEVICE_NAME,
|
|
verify_load: bool = True
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Intenta cargar un dispositivo M4L en un track con verificacion.
|
|
Retorna dict con: success, device_name, error, verified.
|
|
"""
|
|
result = {
|
|
"success": False,
|
|
"device_name": device_name,
|
|
"error": None,
|
|
"verified": False,
|
|
}
|
|
|
|
verify_result = verify_m4l_device_files_exist()
|
|
if not verify_result["usable"]:
|
|
result["error"] = f"Archivo M4L no encontrado: {', '.join(verify_result['missing'])}"
|
|
return result
|
|
|
|
installed_path = ensure_m4l_sampler_device_installed()
|
|
if installed_path is None:
|
|
result["error"] = "No se pudo instalar el device M4L en User Library"
|
|
return result
|
|
|
|
try:
|
|
load_response = ableton.send_command("load_device", {
|
|
"track_index": track_index,
|
|
"device_name": device_name,
|
|
})
|
|
|
|
if _is_error_response(load_response):
|
|
result["error"] = f"Error cargando device: {load_response.get('message')}"
|
|
return result
|
|
|
|
result["success"] = True
|
|
|
|
if verify_load:
|
|
time.sleep(0.5)
|
|
try:
|
|
info_response = ableton.send_command("get_track_info", {
|
|
"track_index": track_index
|
|
})
|
|
if info_response.get("status") == "success":
|
|
devices = info_response.get("result", {}).get("devices", [])
|
|
device_names = [d.get("name", "").lower() for d in devices]
|
|
if any(device_name.lower() in name for name in device_names):
|
|
result["verified"] = True
|
|
else:
|
|
logger.debug(f"Device {device_name} no encontrado en track. Devices: {device_names}")
|
|
except Exception as ve:
|
|
logger.debug(f"No se pudo verificar carga del device: {ve}")
|
|
|
|
return result
|
|
|
|
except Exception as e:
|
|
result["error"] = f"Excepcion cargando device M4L: {e}"
|
|
return result
|
|
|
|
def _select_hybrid_sample_paths(genre: str, key: str = "", bpm: float = 0) -> Dict[str, str]:
|
|
"""Selecciona rutas concretas de samples para el device híbrido M4L."""
|
|
selector = get_sample_selector()
|
|
if not selector:
|
|
raise RuntimeError("Selector de samples no disponible")
|
|
|
|
group = selector.select_for_genre(genre, key or None, bpm if bpm > 0 else None)
|
|
drum_kit = group.drums
|
|
|
|
sample_paths = {
|
|
"kick": drum_kit.kick.path if drum_kit and drum_kit.kick else "",
|
|
"snare": "",
|
|
"hat": "",
|
|
"bass": "",
|
|
}
|
|
|
|
if drum_kit:
|
|
sample_paths["snare"] = (
|
|
drum_kit.snare.path if drum_kit.snare
|
|
else drum_kit.clap.path if drum_kit.clap
|
|
else ""
|
|
)
|
|
sample_paths["hat"] = (
|
|
drum_kit.hat_closed.path if drum_kit.hat_closed
|
|
else drum_kit.hat_open.path if drum_kit.hat_open
|
|
else ""
|
|
)
|
|
|
|
if group.bass:
|
|
sample_paths["bass"] = group.bass[0].path
|
|
|
|
missing = [name for name, value in sample_paths.items() if not value]
|
|
if missing:
|
|
raise RuntimeError(f"Faltan samples para el modo híbrido: {', '.join(missing)}")
|
|
|
|
return sample_paths
|
|
|
|
|
|
def _find_library_file(*patterns: str, rng: Optional[random.Random] = None, session_seed: Optional[int] = None, section: Optional[str] = None) -> str:
|
|
"""Busca un archivo de la librería usando VectorManager (Búsqueda semántica inteligente) con fallback a glob.
|
|
|
|
Args:
|
|
*patterns: Patrones de búsqueda (ej: "*Kick*.wav")
|
|
rng: Random generator opcional
|
|
session_seed: Seed para reproducibilidad del shuffle (T012)
|
|
section: Sección actual para variantes (intro/drop/break) - para T036 Section Casting
|
|
"""
|
|
library_dir = Path(SAMPLES_DIR)
|
|
if not library_dir.exists():
|
|
return ""
|
|
|
|
# T012: Usar seed de sesión si se proporciona
|
|
if session_seed is not None:
|
|
local_rng = random.Random(session_seed)
|
|
else:
|
|
local_rng = rng or random
|
|
|
|
# Patrones que indican canciones completas (no samples)
|
|
FULL_SONG_INDICATORS = [
|
|
"extended mix", "original mix", "radio edit", "club mix", "remix",
|
|
"feat.", "ft.", "pres.", " vs ", " - ", # Artistas con guiones
|
|
]
|
|
|
|
def is_likely_full_song(filepath: str) -> bool:
|
|
"""Detecta si un archivo es probablemente una canción completa."""
|
|
name_lower = Path(filepath).name.lower()
|
|
# Excluir archivos muy largos (>50 chars suelen ser canciones)
|
|
if len(name_lower) > 50:
|
|
return True
|
|
# Excluir por palabras clave de canciones
|
|
for indicator in FULL_SONG_INDICATORS:
|
|
if indicator in name_lower:
|
|
return True
|
|
return False
|
|
|
|
# Intento de búsqueda semántica con VectorManager
|
|
try:
|
|
from vector_manager import VectorManager
|
|
vm = VectorManager(str(library_dir))
|
|
|
|
# Limpiar los patrones para convertirlos en un prompt semántico
|
|
query = " ".join([p.replace('*', '').replace('.wav', '').strip() for p in patterns])
|
|
if query:
|
|
# T011: Aumentar limit de 10 a 50 para más diversidad
|
|
results = vm.semantic_search(query, limit=50)
|
|
if results:
|
|
# Filtrar resultados recientes Y canciones completas
|
|
valid_results = [
|
|
r for r in results
|
|
if r['path'].lower() not in _RECENT_LIBRARY_MATCHES
|
|
and not is_likely_full_song(r['path'])
|
|
]
|
|
pool = valid_results or results
|
|
if pool:
|
|
# T012: Shuffle del pool con seed de sesión para diversidad
|
|
shuffled_pool = pool[:]
|
|
local_rng.shuffle(shuffled_pool)
|
|
selected = shuffled_pool[local_rng.randrange(len(shuffled_pool))]['path']
|
|
_RECENT_LIBRARY_MATCHES.append(selected.lower())
|
|
return selected
|
|
except Exception as e:
|
|
import logging
|
|
logging.getLogger("server").warning(f"Semantic search failed: {e}. Falling back to glob.")
|
|
|
|
# T013: Bucket sampling por subcarpeta (máximo 15 archivos por subcarpeta)
|
|
# Fallback original con bucket sampling
|
|
matches_by_folder: Dict[str, List[Path]] = defaultdict(list)
|
|
seen = set()
|
|
for pattern in patterns:
|
|
for match in sorted(library_dir.glob(pattern)):
|
|
if not match.is_file():
|
|
continue
|
|
key = str(match.resolve()).lower()
|
|
if key in seen:
|
|
continue
|
|
# Filtrar canciones completas
|
|
if is_likely_full_song(str(match)):
|
|
continue
|
|
seen.add(key)
|
|
# Agrupar por carpeta padre para bucket sampling
|
|
folder = str(match.parent)
|
|
matches_by_folder[folder].append(match)
|
|
|
|
# T013: Limitar a máximo 15 archivos por subcarpeta
|
|
MAX_FILES_PER_FOLDER = 15
|
|
matches: List[Path] = []
|
|
for folder, files in matches_by_folder.items():
|
|
# Shuffle con seed de sesión para diversidad (T012)
|
|
shuffled_files = files[:]
|
|
local_rng.shuffle(shuffled_files)
|
|
# Tomar máximo 15 por carpeta
|
|
selected_files = shuffled_files[:MAX_FILES_PER_FOLDER]
|
|
matches.extend(selected_files)
|
|
logger.debug(f"Bucket sampling: {folder} -> {len(selected_files)}/{len(files)} files")
|
|
|
|
if not matches:
|
|
return ""
|
|
|
|
prioritized = [match for match in matches if str(match.resolve()).lower() not in _RECENT_LIBRARY_MATCHES]
|
|
pool = prioritized or matches
|
|
# T012: Shuffle final con seed de sesión
|
|
shuffled_pool = pool[:]
|
|
local_rng.shuffle(shuffled_pool)
|
|
selected = shuffled_pool[local_rng.randrange(len(shuffled_pool))]
|
|
_RECENT_LIBRARY_MATCHES.append(str(selected.resolve()).lower())
|
|
return str(selected)
|
|
|
|
|
|
def _build_audio_fallback_sample_paths(genre: str, key: str = "", bpm: float = 0) -> Dict[str, str]:
|
|
"""Obtiene los samples necesarios para el fallback de audio directo."""
|
|
variant_seed = None
|
|
try:
|
|
generator = get_song_generator()
|
|
current_profile = getattr(generator, "_current_generation_profile", {}) or {}
|
|
variant_seed = current_profile.get("seed")
|
|
except Exception:
|
|
variant_seed = None
|
|
rng = random.Random(int(variant_seed)) if variant_seed is not None else random.Random()
|
|
|
|
sample_paths = _select_hybrid_sample_paths(genre, key, bpm)
|
|
|
|
# T012: Pasar session_seed para reproducibilidad y diversidad
|
|
session_seed = int(variant_seed) if variant_seed else int(time.time())
|
|
|
|
# T014: Actualizar historial de uso para cada sample seleccionado
|
|
# T021: Actualizar fatiga de samples
|
|
def find_and_track(patterns, role):
|
|
path = _find_library_file(*patterns, rng=rng, session_seed=session_seed)
|
|
if path:
|
|
_update_sample_usage(path, role)
|
|
_update_sample_fatigue(path, role) # T021: Registrar fatiga
|
|
return path
|
|
|
|
sample_paths["perc_loop"] = find_and_track(("*Percussion Loop*.wav", "*Perc Loop*.wav"), "perc_loop")
|
|
sample_paths["vocal_loop"] = find_and_track(("*Vocal Loop*.wav", "*Vox*.wav"), "vocal_loop")
|
|
sample_paths["perc_alt"] = find_and_track(("*Percussion Loop*.wav", "*Perc Loop*.wav", "*Drum Loop*Perc*.wav"), "perc_alt")
|
|
sample_paths["top_loop"] = find_and_track(("*Top Loop*.wav", "*Drum Loop*Full*.wav", "*Full Mix*.wav"), "top_loop")
|
|
sample_paths["synth_loop"] = find_and_track(("*Synth_Loop*.wav", "*Synth Loop*.wav", "*Music Loop*.wav"), "synth_loop")
|
|
sample_paths["synth_peak"] = find_and_track(("*Lead Loop*.wav", "*Synth_Loop*.wav", "*Hook*.wav"), "synth_peak")
|
|
sample_paths["vocal_build"] = find_and_track(("*Vocal Loop*.wav", "*Vox*.wav", "*Chant*.wav"), "vocal_build")
|
|
sample_paths["vocal_peak"] = find_and_track(("*Vocal Loop*.wav", "*Vox*.wav", "*Hook Vocal*.wav"), "vocal_peak")
|
|
sample_paths["crash_fx"] = find_and_track(("*Crash*.wav", "*Impact*.wav"), "crash_fx")
|
|
sample_paths["fill_fx"] = find_and_track(("*Fill*.wav", "*Transition*.wav"), "fill_fx")
|
|
sample_paths["snare_roll"] = find_and_track(("*Snareroll*.wav", "*Snare Roll*.wav"), "snare_roll")
|
|
sample_paths["atmos_fx"] = find_and_track(("*Atmos*.wav", "*Drone*.wav", "*Texture*.wav", "*Ambience*.wav"), "atmos_fx")
|
|
sample_paths["vocal_shot"] = find_and_track(("*Vocal One Shot*.wav", "*Vox One Shot*.wav", "*Vocal Shot*.wav"), "vocal_shot")
|
|
|
|
# T014: Guardar historial después de seleccionar todos los samples
|
|
_save_sample_history()
|
|
|
|
return sample_paths
|
|
|
|
|
|
def _iter_audio_fallback_sections(total_beats: int, config: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]:
|
|
sections = list((config or {}).get("sections", []) or [])
|
|
timeline: List[Dict[str, Any]] = []
|
|
cursor = 0.0
|
|
|
|
for index, section in enumerate(sections):
|
|
if not isinstance(section, dict):
|
|
continue
|
|
beats = float(section.get("beats", 0.0) or (float(section.get("bars", 8)) * 4.0))
|
|
if beats <= 0:
|
|
continue
|
|
start = cursor
|
|
end = min(float(total_beats), start + beats)
|
|
if end <= start:
|
|
continue
|
|
timeline.append({
|
|
"index": index,
|
|
"kind": str(section.get("kind", "drop") or "drop").lower(),
|
|
"name": str(section.get("name", "") or ""),
|
|
"start": start,
|
|
"end": end,
|
|
})
|
|
cursor = end
|
|
if cursor >= float(total_beats):
|
|
break
|
|
|
|
if timeline:
|
|
return timeline
|
|
|
|
generic = [
|
|
("intro", 0.0, min(float(total_beats), 16.0)),
|
|
("build", min(float(total_beats), 16.0), min(float(total_beats), 32.0)),
|
|
("drop", min(float(total_beats), 32.0), min(float(total_beats), 48.0)),
|
|
("break", min(float(total_beats), 48.0), min(float(total_beats), 64.0)),
|
|
("drop", min(float(total_beats), 64.0), float(total_beats)),
|
|
]
|
|
for index, (kind, start, end) in enumerate(generic):
|
|
if end > start:
|
|
timeline.append({"index": index, "kind": kind, "name": kind.title(), "start": start, "end": end})
|
|
return timeline
|
|
|
|
|
|
def _build_positions_for_range(start: float, end: float, step: float, offset: float = 0.0) -> List[float]:
|
|
positions: List[float] = []
|
|
if step <= 0 or end <= start:
|
|
return positions
|
|
position = start + offset
|
|
while position < end - 0.05:
|
|
positions.append(round(position, 3))
|
|
position += step
|
|
return positions
|
|
|
|
|
|
def _build_audio_pattern_positions(total_beats: int = 16, config: Optional[Dict[str, Any]] = None) -> Dict[str, List[float]]:
|
|
"""Patrones básicos para el fallback de audio en arrangement."""
|
|
clap_positions = [beat for beat in range(total_beats) if beat % 4 in (1, 3)]
|
|
loop_positions = [float(beat) for beat in range(0, max(total_beats, 16), 16)]
|
|
vocal_positions = [float(beat) for beat in range(8, max(total_beats, 16), 16)]
|
|
positions = {
|
|
"kick": [float(beat) for beat in range(total_beats)],
|
|
"snare": [float(beat) for beat in clap_positions],
|
|
"hat": [round(0.5 + step * 0.5, 3) for step in range(total_beats * 2)],
|
|
"bass": loop_positions or [0.0],
|
|
"perc_loop": loop_positions or [0.0],
|
|
"vocal_loop": vocal_positions or [8.0],
|
|
"perc_alt": [],
|
|
"top_loop": [],
|
|
"synth_loop": [],
|
|
"synth_peak": [],
|
|
"vocal_build": [],
|
|
"vocal_peak": [],
|
|
"crash_fx": [],
|
|
"fill_fx": [],
|
|
"snare_roll": [],
|
|
"atmos_fx": [],
|
|
"vocal_shot": [],
|
|
}
|
|
for section in _iter_audio_fallback_sections(total_beats, config):
|
|
start = float(section["start"])
|
|
end = float(section["end"])
|
|
kind = str(section["kind"]).lower()
|
|
section_length = max(0.0, end - start)
|
|
|
|
if kind in {"intro", "break", "outro"}:
|
|
positions["atmos_fx"].append(round(start, 3))
|
|
|
|
if kind in {"build", "drop"}:
|
|
positions["top_loop"].extend(_build_positions_for_range(start, end, 16.0))
|
|
positions["synth_loop"].append(round(start, 3))
|
|
positions["perc_alt"].extend(_build_positions_for_range(start, end, 8.0, 4.0))
|
|
|
|
if kind == "build":
|
|
positions["vocal_build"].append(round(max(start, end - min(8.0, section_length)), 3))
|
|
positions["snare_roll"].append(round(max(start, end - min(4.0, section_length)), 3))
|
|
positions["fill_fx"].append(round(max(start, end - 1.0), 3))
|
|
elif kind == "drop":
|
|
positions["crash_fx"].append(round(start, 3))
|
|
positions["synth_peak"].extend(_build_positions_for_range(start, end, 16.0))
|
|
positions["vocal_peak"].append(round(start, 3))
|
|
positions["vocal_shot"].extend(_build_positions_for_range(start, end, 8.0, 1.5))
|
|
if section_length >= 16.0:
|
|
positions["fill_fx"].append(round(end - 1.0, 3))
|
|
elif kind == "break":
|
|
positions["vocal_loop"].append(round(start + min(8.0, max(0.0, section_length / 2.0)), 3))
|
|
positions["fill_fx"].append(round(max(start, end - 1.0), 3))
|
|
|
|
for key, values in positions.items():
|
|
positions[key] = sorted({
|
|
round(float(value), 3)
|
|
for value in values
|
|
if 0.0 <= float(value) < float(total_beats)
|
|
})
|
|
return positions
|
|
|
|
def _build_reference_audio_plan(config: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
|
|
if not isinstance(config, dict):
|
|
return None
|
|
|
|
reference_track = config.get("reference_track")
|
|
reference_path = ""
|
|
if isinstance(reference_track, dict):
|
|
reference_path = str(reference_track.get("path", "") or "")
|
|
if not reference_path:
|
|
return None
|
|
|
|
listener = get_reference_listener()
|
|
if listener is None:
|
|
return None
|
|
|
|
sections = config.get("sections", []) or []
|
|
bpm = float(config.get("bpm", 0.0) or 0.0)
|
|
key = str(config.get("key", "") or "")
|
|
variant_seed = config.get("variant_seed", None)
|
|
|
|
try:
|
|
plan = listener.build_arrangement_plan(reference_path, sections, bpm, key, variant_seed=variant_seed)
|
|
except Exception as exc:
|
|
logger.error("Error construyendo plan de referencia desde %s: %s", reference_path, exc)
|
|
return None
|
|
|
|
if not isinstance(plan, dict):
|
|
logger.warning("Plan de referencia invalido para %s", reference_path)
|
|
return None
|
|
|
|
config["reference_audio_plan"] = plan
|
|
|
|
reference = plan.get("reference", {})
|
|
ref_tempo = float(reference.get("tempo", 0.0) or 0.0)
|
|
ref_key = str(reference.get("key", "") or "")
|
|
if ref_tempo > 0:
|
|
config["bpm"] = round(ref_tempo, 3)
|
|
if ref_key:
|
|
config["key"] = ref_key
|
|
config["scale"] = "minor" if "m" in ref_key.lower() else "major"
|
|
|
|
resampler = get_audio_resampler()
|
|
if resampler is not None:
|
|
try:
|
|
derived_layers = resampler.build_transition_layers(
|
|
plan,
|
|
sections,
|
|
float(config.get("bpm", bpm) or bpm or ref_tempo or 0.0),
|
|
variant_seed=variant_seed,
|
|
)
|
|
if derived_layers:
|
|
plan.setdefault("layers", []).extend(derived_layers)
|
|
plan["derived_layers"] = derived_layers
|
|
logger.info(
|
|
"Derived %d transition layers: %s",
|
|
len(derived_layers),
|
|
[layer.get("name", "unnamed") for layer in derived_layers]
|
|
)
|
|
for layer in derived_layers:
|
|
logger.debug(
|
|
" - %s: positions=%s, volume=%.2f, source=%s",
|
|
layer.get("name", "unnamed"),
|
|
layer.get("positions", []),
|
|
float(layer.get("volume", 0.0)),
|
|
layer.get("source", "unknown")
|
|
)
|
|
except Exception as exc:
|
|
logger.warning("No se pudieron derivar transiciones internas: %s", exc, exc_info=True)
|
|
|
|
# Aplicar variación por sección para roles elegibles
|
|
if sections:
|
|
plan = _apply_section_variation_to_plan(plan, sections)
|
|
|
|
total_layers = len(plan.get("layers", []))
|
|
derived_count = len(derived_layers) if derived_layers else 0
|
|
if total_layers > 0:
|
|
logger.info(
|
|
"Reference audio plan listo: %d capas totales (%d derivadas + %d base)",
|
|
total_layers, derived_count, total_layers - derived_count
|
|
)
|
|
|
|
return plan
|
|
|
|
|
|
def _mute_tracks_for_audio_layers(ableton: "AbletonConnection", layer_names: List[str]) -> int:
|
|
muted = 0
|
|
target_names = set()
|
|
for layer_name in layer_names:
|
|
template_name = _match_audio_track_template(layer_name, REFERENCE_AUDIO_MUTE_MAP)
|
|
if template_name:
|
|
target_names.update(REFERENCE_AUDIO_MUTE_MAP.get(template_name, ()))
|
|
|
|
if target_names:
|
|
response = ableton.send_command("get_tracks")
|
|
if not _is_error_response(response):
|
|
result = response.get("result", [])
|
|
if isinstance(result, dict):
|
|
tracks = result.get("tracks", [])
|
|
elif isinstance(result, list):
|
|
tracks = result
|
|
else:
|
|
tracks = []
|
|
|
|
for track in tracks:
|
|
track_name = str(track.get("name", "") or "").strip().upper()
|
|
if track_name not in target_names:
|
|
continue
|
|
try:
|
|
ableton.send_command("set_track_mute", {
|
|
"track_index": int(track.get("index", -1)),
|
|
"mute": True,
|
|
})
|
|
muted += 1
|
|
except Exception:
|
|
pass
|
|
|
|
if muted == 0:
|
|
for track_index in range(5):
|
|
try:
|
|
ableton.send_command("set_track_mute", {"track_index": track_index, "mute": True})
|
|
muted += 1
|
|
except Exception:
|
|
pass
|
|
|
|
return muted
|
|
|
|
|
|
def _clamp_float(value: float, minimum: float, maximum: float) -> float:
|
|
return max(minimum, min(maximum, float(value)))
|
|
|
|
|
|
def _format_reference_audio_layer_result(materialized: Dict[str, Any]) -> str:
|
|
parts = [
|
|
f"Audio reference fallback listo ({materialized.get('reference_name', 'referencia')}, "
|
|
f"{materialized.get('reference_device', 'numpy')}): "
|
|
+ ", ".join(materialized.get("created_tracks", []))
|
|
]
|
|
if materialized.get("audio_mix_reports"):
|
|
parts.append(" | Mix: " + " / ".join(materialized.get("audio_mix_reports", [])))
|
|
parts.append(f" | MIDI silenciados: {int(materialized.get('muted_tracks', 0))}")
|
|
layer_errors = materialized.get("layer_errors", [])
|
|
if layer_errors:
|
|
parts.append(f" | Errores: {len(layer_errors)} layers fallaron")
|
|
return "".join(parts)
|
|
|
|
|
|
def _materialize_reference_audio_layers(
|
|
ableton: "AbletonConnection",
|
|
reference_audio_plan: Dict[str, Any],
|
|
total_beats: int,
|
|
return_mapping: Dict[str, int],
|
|
mute_duplicates: bool = True,
|
|
finalize_transport: bool = True,
|
|
) -> Dict[str, Any]:
|
|
created_tracks: List[str] = []
|
|
audio_mix_reports: List[str] = []
|
|
audio_track_indices: Dict[str, int] = {}
|
|
layer_metadata: Dict[str, Dict[str, Any]] = {}
|
|
layer_names: List[str] = []
|
|
layer_errors: List[str] = []
|
|
|
|
all_layers = list(reference_audio_plan.get("layers", []))
|
|
derived_layer_names = set()
|
|
derived_layers = reference_audio_plan.get("derived_layers", [])
|
|
if derived_layers:
|
|
derived_layer_names = {layer.get("name") for layer in derived_layers if isinstance(layer, dict)}
|
|
all_layers.extend(derived_layers)
|
|
|
|
logger.info(
|
|
"Materializing %d audio layers (%d derived, %d base)",
|
|
len(all_layers), len(derived_layer_names), len(all_layers) - len(derived_layer_names)
|
|
)
|
|
|
|
for layer_index, layer in enumerate(all_layers):
|
|
if not isinstance(layer, dict):
|
|
continue
|
|
|
|
sample_path = str(layer.get("file_path", "") or "")
|
|
positions = list(layer.get("positions", []) or [])
|
|
track_name = str(layer.get("name", "AUDIO LAYER") or "AUDIO LAYER")
|
|
if not sample_path or not positions:
|
|
logger.debug("Skipping layer %d (%s): missing path or positions", layer_index, track_name)
|
|
continue
|
|
|
|
is_derived = track_name in derived_layer_names
|
|
layer_type = "DERIVED" if is_derived else "BASE"
|
|
role = layer.get('role', '')
|
|
|
|
# Check si tiene variantes por sección
|
|
section_variants = layer.get('section_variants', {})
|
|
|
|
if section_variants:
|
|
logger.debug("MATERIALIZE: role '%s' has %d section variants", role, len(section_variants))
|
|
|
|
# Procesar cada variante de sección
|
|
for section_start, variant_info in section_variants.items():
|
|
# Usar samples filtrados según variante
|
|
variant_samples = _filter_samples_by_variant(
|
|
layer.get('samples', []),
|
|
variant_info.get('variant', 'standard')
|
|
)
|
|
|
|
if variant_samples != layer.get('samples', []):
|
|
logger.debug("VARIANT_MATERIALIZATION: role '%s' using variant samples for section starting at %.1f",
|
|
role, section_start)
|
|
# Usar variant_samples para esta sección
|
|
# Nota: La lógica de filtrado específica por sección se implementaría aquí
|
|
# si los samples tuvieran suficiente metadato
|
|
|
|
logger.debug(
|
|
"[%s] Layer %d: %s, positions=%s, volume=%.2f",
|
|
layer_type, layer_index, track_name, positions, float(layer.get("volume", 0.7))
|
|
)
|
|
|
|
try:
|
|
create_response = ableton.send_command("create_audio_track", {"index": -1})
|
|
if _is_error_response(create_response):
|
|
raise RuntimeError(create_response.get("message", f"No se pudo crear {track_name}"))
|
|
|
|
track_index = create_response.get("result", {}).get("index")
|
|
if track_index is None:
|
|
raise RuntimeError(f"Ableton no devolvio el indice para {track_name}")
|
|
|
|
base_volume = float(layer.get("volume", 0.7))
|
|
ableton.send_command("set_track_name", {"track_index": track_index, "name": track_name})
|
|
ableton.send_command("set_track_color", {
|
|
"track_index": track_index,
|
|
"color": int(layer.get("color", 20)),
|
|
})
|
|
ableton.send_command("set_track_volume", {
|
|
"track_index": track_index,
|
|
"volume": _linear_to_live_slider(base_volume),
|
|
})
|
|
|
|
pattern_response = ableton.send_command("create_arrangement_audio_pattern", {
|
|
"track_index": track_index,
|
|
"file_path": sample_path,
|
|
"positions": positions,
|
|
"name": track_name,
|
|
})
|
|
if _is_error_response(pattern_response):
|
|
raise RuntimeError(pattern_response.get("message", f"No se pudo crear audio para {track_name}"))
|
|
|
|
mix_result = _apply_audio_track_mix(
|
|
ableton,
|
|
track_index,
|
|
track_name,
|
|
base_volume,
|
|
return_mapping,
|
|
)
|
|
audio_mix_reports.append(
|
|
f"{track_name}: pan {mix_result['pan']:+.2f}, sends {mix_result['sends']}, fx {mix_result['fx']}"
|
|
)
|
|
layer_names.append(track_name)
|
|
created_tracks.append(f"{track_name}: {Path(sample_path).name}")
|
|
audio_track_indices[track_name] = int(track_index)
|
|
layer_metadata[track_name] = {
|
|
"track_index": int(track_index),
|
|
"volume": base_volume,
|
|
"positions": positions,
|
|
"color": int(layer.get("color", 20)),
|
|
}
|
|
logger.debug(
|
|
"[%s] Created track %d: %s (pan=%.2f, sends=%d, fx=%d)",
|
|
layer_type, track_index, track_name, mix_result['pan'], mix_result['sends'], mix_result['fx']
|
|
)
|
|
except Exception as layer_exc:
|
|
error_msg = f"Layer {layer_index} ({track_name}) fallo: {layer_exc}"
|
|
logger.error(error_msg)
|
|
layer_errors.append(error_msg)
|
|
continue
|
|
|
|
if not created_tracks:
|
|
error_summary = "; ".join(layer_errors) if layer_errors else "Sin layers validos"
|
|
raise RuntimeError(f"No se pudieron crear capas de audio guiadas por referencia: {error_summary}")
|
|
|
|
derived_created = sum(1 for name in layer_names if name in derived_layer_names)
|
|
base_created = len(layer_names) - derived_created
|
|
logger.info(
|
|
"Materialization complete: %d tracks created (%d derived, %d base), %d errors",
|
|
len(created_tracks), derived_created, base_created, len(layer_errors)
|
|
)
|
|
|
|
muted_tracks = _mute_tracks_for_audio_layers(ableton, layer_names) if mute_duplicates else 0
|
|
if finalize_transport:
|
|
ableton.send_command("loop_selection", {"start": 0, "length": float(total_beats), "enable": False})
|
|
ableton.send_command("jump_to", {"time": 0})
|
|
|
|
reference = reference_audio_plan.get("reference", {})
|
|
return {
|
|
"created_tracks": created_tracks,
|
|
"audio_mix_reports": audio_mix_reports,
|
|
"audio_track_indices": audio_track_indices,
|
|
"layer_metadata": layer_metadata,
|
|
"layer_names": layer_names,
|
|
"muted_tracks": muted_tracks,
|
|
"reference_name": reference.get("file_name", "referencia"),
|
|
"reference_device": reference.get("device", "numpy"),
|
|
"layer_errors": layer_errors,
|
|
}
|
|
|
|
|
|
def _layer_has_activity_in_section(layer_data: Dict[str, Any], start: float, end: float) -> bool:
|
|
for position in layer_data.get("positions", []) or []:
|
|
try:
|
|
position_value = float(position)
|
|
except Exception:
|
|
continue
|
|
if start <= position_value < end:
|
|
return True
|
|
return False
|
|
|
|
|
|
def _reference_audio_section_factor(track_name: str, section_kind: str, section_name: str) -> float:
|
|
normalized = str(track_name or "").strip().upper()
|
|
kind = str(section_kind or "drop").lower()
|
|
is_peak = "peak" in str(section_name or "").lower()
|
|
|
|
if normalized in {"AUDIO KICK", "AUDIO CLAP", "AUDIO HAT", "AUDIO BASS LOOP", "AUDIO PERC MAIN", "AUDIO PERC ALT"}:
|
|
factors = {"intro": 0.82, "build": 0.92, "drop": 1.0, "break": 0.74, "outro": 0.78}
|
|
elif normalized == "AUDIO TOP LOOP":
|
|
factors = {"intro": 0.38, "build": 0.74, "drop": 1.0, "break": 0.5, "outro": 0.44}
|
|
elif normalized == "AUDIO SYNTH LOOP":
|
|
factors = {"intro": 0.0, "build": 0.64, "drop": 0.9, "break": 0.34, "outro": 0.24}
|
|
elif normalized == "AUDIO SYNTH PEAK":
|
|
factors = {"intro": 0.0, "build": 0.34, "drop": 0.86, "break": 0.0, "outro": 0.0}
|
|
elif normalized == "AUDIO VOCAL LOOP":
|
|
factors = {"intro": 0.0, "build": 0.58, "drop": 0.82, "break": 0.3, "outro": 0.0}
|
|
elif normalized == "AUDIO VOCAL BUILD":
|
|
factors = {"intro": 0.0, "build": 1.0, "drop": 0.42, "break": 0.38, "outro": 0.0}
|
|
elif normalized == "AUDIO VOCAL PEAK":
|
|
factors = {"intro": 0.0, "build": 0.26, "drop": 0.92, "break": 0.0, "outro": 0.0}
|
|
elif normalized in {"AUDIO CRASH FX", "AUDIO TRANSITION FILL", "AUDIO SNARE ROLL"}:
|
|
factors = {"intro": 0.0, "build": 1.0, "drop": 0.9, "break": 0.86, "outro": 0.2}
|
|
elif normalized == "AUDIO ATMOS":
|
|
factors = {"intro": 1.0, "build": 0.68, "drop": 0.46, "break": 0.94, "outro": 0.86}
|
|
elif normalized == "AUDIO VOCAL SHOT":
|
|
factors = {"intro": 0.0, "build": 0.56, "drop": 0.92, "break": 0.0, "outro": 0.0}
|
|
elif normalized == "AUDIO RESAMPLE REVERSE FX":
|
|
factors = {"intro": 0.0, "build": 1.0, "drop": 0.88, "break": 0.78, "outro": 0.32}
|
|
elif normalized == "AUDIO RESAMPLE RISER":
|
|
factors = {"intro": 0.0, "build": 1.0, "drop": 0.62, "break": 0.0, "outro": 0.0}
|
|
elif normalized == "AUDIO RESAMPLE DOWNLIFTER":
|
|
factors = {"intro": 0.0, "build": 0.22, "drop": 0.42, "break": 1.0, "outro": 0.88}
|
|
elif normalized == "AUDIO RESAMPLE STUTTER":
|
|
factors = {"intro": 0.0, "build": 0.96, "drop": 0.76, "break": 0.28, "outro": 0.0}
|
|
else:
|
|
factors = {"intro": 0.7, "build": 0.82, "drop": 1.0, "break": 0.62, "outro": 0.58}
|
|
|
|
factor = float(factors.get(kind, 0.78))
|
|
if is_peak and normalized in {"AUDIO SYNTH PEAK", "AUDIO VOCAL PEAK", "AUDIO TOP LOOP", "AUDIO CRASH FX"}:
|
|
factor *= 1.08
|
|
return factor
|
|
|
|
|
|
def _reference_audio_send_scales(track_name: str, section_kind: str, section_name: str) -> Dict[str, float]:
|
|
normalized = str(track_name or "").strip().upper()
|
|
kind = str(section_kind or "drop").lower()
|
|
name = str(section_name or "").lower()
|
|
|
|
scales = {
|
|
"space": 1.18 if kind == "break" else 1.06 if kind == "intro" else 0.94 if kind == "drop" else 1.0,
|
|
"echo": 1.22 if kind == "build" else 1.12 if "peak" in name else 0.9 if kind == "outro" else 1.0,
|
|
"heat": 1.14 if kind == "drop" else 0.88 if kind in {"intro", "break"} else 1.0,
|
|
"glue": 1.08 if kind == "drop" else 0.94 if kind == "intro" else 1.0,
|
|
"pan": 1.16 if kind == "drop" else 0.86 if kind == "break" else 1.0,
|
|
}
|
|
|
|
if normalized in {"AUDIO CRASH FX", "AUDIO TRANSITION FILL", "AUDIO SNARE ROLL"}:
|
|
scales["space"] += 0.08
|
|
scales["echo"] += 0.12
|
|
if normalized in {"AUDIO RESAMPLE REVERSE FX", "AUDIO RESAMPLE RISER", "AUDIO RESAMPLE DOWNLIFTER"}:
|
|
scales["space"] += 0.16
|
|
scales["echo"] += 0.14
|
|
scales["heat"] += 0.06 if kind in {"build", "drop"} else 0.0
|
|
if normalized == "AUDIO RESAMPLE STUTTER":
|
|
scales["echo"] += 0.2
|
|
scales["space"] += 0.06 if kind == "break" else 0.08 if kind == "drop" else 0.04
|
|
if normalized.startswith("AUDIO VOCAL"):
|
|
scales["echo"] += 0.08 if kind in {"build", "drop"} else 0.0
|
|
scales["space"] += 0.04 if kind == "break" else 0.0
|
|
if normalized == "AUDIO ATMOS":
|
|
scales["space"] += 0.1
|
|
scales["pan"] *= 0.9
|
|
|
|
return scales
|
|
|
|
|
|
def _build_reference_audio_performance(
|
|
reference_audio_plan: Dict[str, Any],
|
|
sections: List[Dict[str, Any]],
|
|
materialized: Dict[str, Any],
|
|
) -> List[Dict[str, Any]]:
|
|
if not isinstance(reference_audio_plan, dict) or not sections:
|
|
return []
|
|
|
|
layer_metadata = materialized.get("layer_metadata", {})
|
|
if not isinstance(layer_metadata, dict) or not layer_metadata:
|
|
return []
|
|
|
|
snapshots: List[Dict[str, Any]] = []
|
|
arrangement_time = 0.0
|
|
for scene_index, section in enumerate(sections):
|
|
beats = float(section.get("beats", 0.0) or (float(section.get("bars", 8)) * 4.0))
|
|
start = arrangement_time
|
|
end = arrangement_time + max(1.0, beats)
|
|
arrangement_time = end
|
|
section_kind = str(section.get("kind", "drop")).lower()
|
|
section_name = str(section.get("name", "")).lower()
|
|
track_states: List[Dict[str, Any]] = []
|
|
|
|
for track_name, layer_data in layer_metadata.items():
|
|
if not _layer_has_activity_in_section(layer_data, start, end):
|
|
continue
|
|
|
|
base_volume = float(layer_data.get("volume", 0.7))
|
|
base_profile = _resolve_audio_mix_profile(track_name, base_volume)
|
|
factor = _reference_audio_section_factor(track_name, section_kind, section_name)
|
|
scales = _reference_audio_send_scales(track_name, section_kind, section_name)
|
|
|
|
track_state = {
|
|
"track_index": int(layer_data["track_index"]),
|
|
"volume": round(_clamp_float(base_volume * factor, 0.0, 1.0), 3),
|
|
"pan": round(_clamp_float(float(base_profile.get("pan", 0.0)) * scales["pan"], -1.0, 1.0), 3),
|
|
"sends": {},
|
|
}
|
|
for send_name, send_value in dict(base_profile.get("sends", {})).items():
|
|
send_scale = float(scales.get(str(send_name).lower(), 1.0))
|
|
track_state["sends"][send_name] = round(_clamp_float(float(send_value) * send_scale, 0.0, 1.0), 3)
|
|
track_states.append(track_state)
|
|
|
|
if track_states:
|
|
snapshots.append({
|
|
"scene_index": int(section.get("index", scene_index)),
|
|
"track_states": track_states,
|
|
})
|
|
|
|
return snapshots
|
|
|
|
|
|
def _merge_performance_snapshots(base_snapshots: List[Dict[str, Any]], extra_snapshots: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
merged: Dict[int, Dict[str, Any]] = {}
|
|
for snapshot_list in (base_snapshots or [], extra_snapshots or []):
|
|
for item in snapshot_list:
|
|
if not isinstance(item, dict):
|
|
continue
|
|
scene_index = int(item.get("scene_index", len(merged)))
|
|
bucket = merged.setdefault(scene_index, {"scene_index": scene_index, "track_states": []})
|
|
bucket["track_states"].extend([
|
|
state for state in item.get("track_states", []) or []
|
|
if isinstance(state, dict)
|
|
])
|
|
|
|
return [merged[index] for index in sorted(merged)]
|
|
|
|
|
|
def _infer_m4l_pattern(genre: str, style: str = "") -> str:
|
|
genre_text = f"{genre} {style}".lower()
|
|
if "house" in genre_text:
|
|
return "house"
|
|
if "minimal" in genre_text:
|
|
return "minimal"
|
|
if "dnb" in genre_text or "drum-and-bass" in genre_text or "jungle" in genre_text:
|
|
return "breakbeat"
|
|
return "techno"
|
|
|
|
|
|
def setup_hybrid_m4l_sampler(genre: str, style: str = "", key: str = "", bpm: float = 0) -> str:
|
|
"""
|
|
Prepara el track hibrido M4L con manejo robusto de errores.
|
|
Usa try_load_m4l_device_on_track para carga verificada.
|
|
Retorna mensaje de exito o error descriptivo.
|
|
"""
|
|
# Verificar que los archivos M4L existen antes de proceder
|
|
verify_result = verify_m4l_device_files_exist()
|
|
if not verify_result["usable"]:
|
|
missing = ", ".join(verify_result["missing"])
|
|
logger.warning(f"M4L no disponible: faltan archivos {missing}")
|
|
raise RuntimeError(f"M4L no disponible: archivos no encontrados ({missing})")
|
|
|
|
try:
|
|
sample_paths = _select_hybrid_sample_paths(genre, key, bpm)
|
|
except Exception as sample_error:
|
|
logger.warning(f"Error seleccionando samples para M4L: {sample_error}")
|
|
raise RuntimeError(f"M4L no disponible: {sample_error}") from sample_error
|
|
|
|
ableton = get_ableton_connection()
|
|
track_index = None
|
|
|
|
# Crear track de audio
|
|
create_response = ableton.send_command("create_audio_track", {"index": -1})
|
|
if _is_error_response(create_response):
|
|
raise RuntimeError(f"M4L no disponible: {create_response.get('message', 'No se pudo crear track')}")
|
|
|
|
track_index = create_response.get("result", {}).get("index")
|
|
if track_index is None:
|
|
raise RuntimeError("M4L no disponible: Ableton no devolvio indice del track")
|
|
|
|
try:
|
|
# Configurar track
|
|
ableton.send_command("set_track_name", {"track_index": track_index, "name": HYBRID_DRUM_TRACK_NAME})
|
|
ableton.send_command("set_track_color", {"track_index": track_index, "color": HYBRID_DRUM_TRACK_COLOR})
|
|
ableton.send_command("set_track_volume", {"track_index": track_index, "volume": _linear_to_live_slider(0.78)})
|
|
|
|
# Cargar device M4L con verificacion
|
|
load_result = try_load_m4l_device_on_track(ableton, track_index, M4L_DEVICE_NAME, verify_load=True)
|
|
if not load_result.get("success"):
|
|
error_msg = load_result.get("error", "Error desconocido cargando device")
|
|
logger.warning(f"Fallo carga M4L: {error_msg}")
|
|
raise RuntimeError(error_msg)
|
|
|
|
# Si el device no fue verificado, continuar con advertencia
|
|
if not load_result.get("verified"):
|
|
logger.warning("Device M4L cargado pero no verificado, continuando...")
|
|
|
|
# Esperar a que M4L este listo
|
|
time.sleep(0.75)
|
|
|
|
# Enviar comandos UDP con manejo de errores
|
|
commands_sent = 0
|
|
if send_m4l_sampler_command("clear_song"):
|
|
commands_sent += 1
|
|
if send_m4l_sampler_command("set_bpm", int(round(bpm)) if bpm else 128):
|
|
commands_sent += 1
|
|
if send_m4l_sampler_command(
|
|
"load_drum_kit",
|
|
_udp_safe_path(sample_paths["kick"]),
|
|
_udp_safe_path(sample_paths["snare"]),
|
|
_udp_safe_path(sample_paths["hat"]),
|
|
_udp_safe_path(sample_paths["bass"]),
|
|
):
|
|
commands_sent += 1
|
|
if send_m4l_sampler_command("generate_pattern", _infer_m4l_pattern(genre, style)):
|
|
commands_sent += 1
|
|
|
|
# Si no se enviaron comandos UDP, el device probablemente no esta respondiendo
|
|
if commands_sent == 0:
|
|
logger.warning("Device M4L no responde a comandos UDP")
|
|
raise RuntimeError("Device M4L no responde a comandos UDP")
|
|
|
|
logger.info(f"M4L listo: {commands_sent} comandos enviados")
|
|
return (
|
|
f"Hibrido M4L listo en track {track_index}: "
|
|
f"{Path(sample_paths['kick']).name}, {Path(sample_paths['snare']).name}, "
|
|
f"{Path(sample_paths['hat']).name}, {Path(sample_paths['bass']).name}"
|
|
)
|
|
|
|
except Exception as e:
|
|
# Cleanup: eliminar track si falla
|
|
if track_index is not None:
|
|
try:
|
|
ableton.send_command("delete_track", {"track_index": track_index})
|
|
except Exception:
|
|
pass
|
|
logger.error(f"Error en setup_hybrid_m4l_sampler: {e}")
|
|
raise
|
|
|
|
def setup_audio_sample_fallback(
|
|
genre: str,
|
|
style: str = "",
|
|
key: str = "",
|
|
bpm: float = 0,
|
|
total_beats: int = 16,
|
|
config: Optional[Dict[str, Any]] = None,
|
|
) -> str:
|
|
"""Crea un backing audible con clips de audio reales desde la libreria local."""
|
|
ableton = get_ableton_connection()
|
|
created_tracks = []
|
|
audio_mix_reports = []
|
|
reference_audio_plan = None
|
|
return_mapping = _build_return_send_mapping(config) if isinstance(config, dict) else {}
|
|
if isinstance(config, dict):
|
|
reference_audio_plan = config.get("reference_audio_plan")
|
|
|
|
if isinstance(reference_audio_plan, dict) and reference_audio_plan.get("layers"):
|
|
materialized = _materialize_reference_audio_layers(
|
|
ableton,
|
|
reference_audio_plan,
|
|
total_beats,
|
|
return_mapping,
|
|
mute_duplicates=True,
|
|
finalize_transport=True,
|
|
)
|
|
return _format_reference_audio_layer_result(materialized)
|
|
|
|
sample_paths = _build_audio_fallback_sample_paths(genre, key, bpm)
|
|
positions = _build_audio_pattern_positions(total_beats, config)
|
|
created_layer_names = []
|
|
for track_name, sample_key, color, volume in AUDIO_FALLBACK_TRACK_SPECS:
|
|
sample_path = sample_paths.get(sample_key, "")
|
|
if not sample_path:
|
|
continue
|
|
|
|
create_response = ableton.send_command("create_audio_track", {"index": -1})
|
|
if _is_error_response(create_response):
|
|
raise RuntimeError(create_response.get("message", f"No se pudo crear {track_name}"))
|
|
|
|
track_index = create_response.get("result", {}).get("index")
|
|
if track_index is None:
|
|
raise RuntimeError(f"Ableton no devolvio el indice para {track_name}")
|
|
|
|
ableton.send_command("set_track_name", {"track_index": track_index, "name": track_name})
|
|
ableton.send_command("set_track_color", {"track_index": track_index, "color": color})
|
|
ableton.send_command("set_track_volume", {"track_index": track_index, "volume": _linear_to_live_slider(volume)})
|
|
|
|
pattern_response = ableton.send_command("create_arrangement_audio_pattern", {
|
|
"track_index": track_index,
|
|
"file_path": sample_path,
|
|
"positions": positions.get(sample_key, [0.0]),
|
|
"name": track_name,
|
|
})
|
|
if _is_error_response(pattern_response):
|
|
raise RuntimeError(pattern_response.get("message", f"No se pudo crear audio para {track_name}"))
|
|
|
|
mix_result = _apply_audio_track_mix(ableton, track_index, track_name, float(volume), return_mapping)
|
|
audio_mix_reports.append(
|
|
f"{track_name}: pan {mix_result['pan']:+.2f}, sends {mix_result['sends']}, fx {mix_result['fx']}"
|
|
)
|
|
created_tracks.append(f"{track_name}: {Path(sample_path).name}")
|
|
created_layer_names.append(track_name)
|
|
|
|
for optional_name, optional_key, color, volume in AUDIO_OPTIONAL_FALLBACK_TRACK_SPECS:
|
|
sample_path = sample_paths.get(optional_key, "")
|
|
if not sample_path:
|
|
continue
|
|
|
|
create_response = ableton.send_command("create_audio_track", {"index": -1})
|
|
if _is_error_response(create_response):
|
|
continue
|
|
|
|
track_index = create_response.get("result", {}).get("index")
|
|
if track_index is None:
|
|
continue
|
|
|
|
ableton.send_command("set_track_name", {"track_index": track_index, "name": optional_name})
|
|
ableton.send_command("set_track_color", {"track_index": track_index, "color": color})
|
|
ableton.send_command("set_track_volume", {"track_index": track_index, "volume": _linear_to_live_slider(volume)})
|
|
ableton.send_command("create_arrangement_audio_pattern", {
|
|
"track_index": track_index,
|
|
"file_path": sample_path,
|
|
"positions": positions.get(optional_key, [0.0]),
|
|
"name": optional_name,
|
|
})
|
|
mix_result = _apply_audio_track_mix(ableton, track_index, optional_name, float(volume), return_mapping)
|
|
audio_mix_reports.append(
|
|
f"{optional_name}: pan {mix_result['pan']:+.2f}, sends {mix_result['sends']}, fx {mix_result['fx']}"
|
|
)
|
|
created_tracks.append(f"{optional_name}: {Path(sample_path).name}")
|
|
created_layer_names.append(optional_name)
|
|
|
|
muted = _mute_tracks_for_audio_layers(ableton, created_layer_names)
|
|
|
|
ableton.send_command("loop_selection", {"start": 0, "length": float(total_beats), "enable": False})
|
|
ableton.send_command("jump_to", {"time": 0})
|
|
|
|
if not created_tracks:
|
|
raise RuntimeError("No se pudieron crear tracks de audio con la libreria local")
|
|
|
|
return (
|
|
"Audio fallback listo en arrangement: "
|
|
+ ", ".join(created_tracks)
|
|
+ (" | Mix: " + " / ".join(audio_mix_reports) if audio_mix_reports else "")
|
|
+ f" | MIDI silenciados: {muted}"
|
|
)
|
|
|
|
def _sleep_until(target_time: float):
|
|
while True:
|
|
remaining = target_time - time.monotonic()
|
|
if remaining <= 0:
|
|
return
|
|
time.sleep(min(0.25, remaining))
|
|
|
|
|
|
def _build_return_send_mapping(config: Dict[str, Any]) -> Dict[str, int]:
|
|
mapping: Dict[str, int] = {}
|
|
for index, item in enumerate(config.get("returns", []) or []):
|
|
if not isinstance(item, dict):
|
|
continue
|
|
send_key = str(item.get("send_key", item.get("name", ""))).strip().lower()
|
|
if send_key:
|
|
mapping[send_key] = index
|
|
return mapping
|
|
|
|
|
|
def _normalize_track_name(value: Any) -> str:
|
|
return " ".join(str(value or "").strip().upper().split())
|
|
|
|
|
|
def _extract_tracks_payload(response: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|
if _is_error_response(response):
|
|
return []
|
|
result = response.get("result", [])
|
|
if isinstance(result, dict):
|
|
return list(result.get("tracks", []) or [])
|
|
if isinstance(result, list):
|
|
return result
|
|
return []
|
|
|
|
|
|
def _build_config_track_bus_map(config: Dict[str, Any]) -> Dict[str, str]:
|
|
mapping: Dict[str, str] = {}
|
|
for track in config.get("tracks", []) or []:
|
|
if not isinstance(track, dict):
|
|
continue
|
|
track_name = _normalize_track_name(track.get("name", ""))
|
|
bus_key = str(track.get("bus", "") or "").strip().lower()
|
|
if track_name and bus_key:
|
|
mapping[track_name] = bus_key
|
|
return mapping
|
|
|
|
|
|
def _match_audio_track_template(track_name: str, mapping: Dict[str, Any]) -> Optional[str]:
|
|
normalized = _normalize_track_name(track_name)
|
|
if not normalized:
|
|
return None
|
|
if normalized in mapping:
|
|
return normalized
|
|
|
|
for template_name in sorted(mapping.keys(), key=len, reverse=True):
|
|
if normalized.startswith(f"{template_name} ("):
|
|
return template_name
|
|
return None
|
|
|
|
|
|
def _resolve_bus_key_for_track(track_name: str, config_track_bus_map: Dict[str, str]) -> Optional[str]:
|
|
normalized = _normalize_track_name(track_name)
|
|
if not normalized:
|
|
return None
|
|
if normalized in config_track_bus_map:
|
|
return config_track_bus_map[normalized]
|
|
template_name = _match_audio_track_template(normalized, AUDIO_TRACK_BUS_KEYS)
|
|
if template_name:
|
|
return AUDIO_TRACK_BUS_KEYS[template_name]
|
|
if normalized.startswith("AUDIO VOCAL"):
|
|
return "vocal"
|
|
if normalized.startswith("AUDIO BASS"):
|
|
return "bass"
|
|
if normalized.startswith("AUDIO ") and any(token in normalized for token in ("ATMOS", "RISER", "IMPACT", "FX")):
|
|
return "fx"
|
|
if normalized.startswith("AUDIO "):
|
|
return "music"
|
|
return None
|
|
|
|
|
|
def _normalize_device_key(name: Any) -> str:
|
|
return "".join(char for char in str(name or "").strip().lower() if char.isalnum())
|
|
|
|
|
|
def _build_return_device_lookup(ableton: "AbletonConnection", config: Dict[str, Any]) -> Dict[int, Dict[str, List[int]]]:
|
|
lookup: Dict[int, Dict[str, List[int]]] = {}
|
|
for return_index, _ in enumerate(config.get("returns", []) or []):
|
|
try:
|
|
response = ableton.send_command("get_devices", {
|
|
"track_type": "return",
|
|
"track_index": int(return_index),
|
|
})
|
|
except Exception:
|
|
continue
|
|
|
|
device_lookup: Dict[str, List[int]] = {}
|
|
for device in _extract_devices_payload(response):
|
|
normalized_name = _normalize_device_key(device.get("name", ""))
|
|
if not normalized_name:
|
|
continue
|
|
device_lookup.setdefault(normalized_name, []).append(int(device.get("index", 0)))
|
|
lookup[int(return_index)] = device_lookup
|
|
return lookup
|
|
|
|
|
|
def _build_track_device_lookup(ableton: "AbletonConnection", track_indices: List[int]) -> Dict[int, Dict[str, List[int]]]:
|
|
"""
|
|
Build a lookup mapping track_index -> device_name -> [device_indices].
|
|
|
|
Similar to _build_return_device_lookup but for regular MIDI/Audio tracks.
|
|
"""
|
|
lookup: Dict[int, Dict[str, List[int]]] = {}
|
|
for track_index in track_indices:
|
|
try:
|
|
response = ableton.send_command("get_devices", {
|
|
"track_index": int(track_index),
|
|
})
|
|
except Exception:
|
|
continue
|
|
|
|
device_lookup: Dict[str, List[int]] = {}
|
|
for device in _extract_devices_payload(response):
|
|
normalized_name = _normalize_device_key(device.get("name", ""))
|
|
if not normalized_name:
|
|
continue
|
|
device_lookup.setdefault(normalized_name, []).append(int(device.get("index", 0)))
|
|
lookup[int(track_index)] = device_lookup
|
|
return lookup
|
|
|
|
|
|
def _build_bus_device_lookup(ableton: "AbletonConnection", bus_mapping: Dict[str, Dict[str, Any]]) -> Dict[int, Dict[str, List[int]]]:
|
|
lookup: Dict[int, Dict[str, List[int]]] = {}
|
|
for bus_key, bus_info in bus_mapping.items():
|
|
track_index = int(bus_info.get("track_index", -1))
|
|
if track_index <0:
|
|
continue
|
|
try:
|
|
response = ableton.send_command("get_devices", {
|
|
"track_index": track_index,
|
|
})
|
|
except Exception:
|
|
continue
|
|
|
|
device_lookup: Dict[str, List[int]] = {}
|
|
for device in _extract_devices_payload(response):
|
|
normalized_name = _normalize_device_key(device.get("name", ""))
|
|
if not normalized_name:
|
|
continue
|
|
device_lookup.setdefault(normalized_name, []).append(int(device.get("index", 0)))
|
|
lookup[track_index] = device_lookup
|
|
return lookup
|
|
|
|
|
|
def _resolve_audio_mix_profile(track_name: str, base_volume: float) -> Dict[str, Any]:
|
|
normalized = _normalize_track_name(track_name)
|
|
template_name = _match_audio_track_template(normalized, AUDIO_LAYER_MIX_PROFILES)
|
|
profile = dict(AUDIO_LAYER_MIX_PROFILES.get(template_name or normalized, {}))
|
|
profile.setdefault("volume", float(base_volume))
|
|
profile["volume"] = _clamp_float(float(profile.get("volume", base_volume)), 0.0, 1.0)
|
|
profile.setdefault("pan", 0.0)
|
|
profile.setdefault("sends", {})
|
|
profile.setdefault("fx_chain", [])
|
|
return profile
|
|
|
|
|
|
def _extract_devices_payload(response: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|
if _is_error_response(response):
|
|
return []
|
|
result = response.get("result", [])
|
|
if isinstance(result, dict):
|
|
return list(result.get("devices", []) or [])
|
|
if isinstance(result, list):
|
|
return result
|
|
return []
|
|
|
|
|
|
def _load_audio_fx_chain(
|
|
ableton: "AbletonConnection",
|
|
track_index: int,
|
|
fx_chain: List[Dict[str, Any]],
|
|
track_type: str = "track",
|
|
) -> int:
|
|
if not isinstance(fx_chain, list) or not fx_chain:
|
|
return 0
|
|
|
|
loaded = 0
|
|
base_params = {"track_index": track_index}
|
|
if track_type and track_type != "track":
|
|
base_params["track_type"] = track_type
|
|
|
|
for spec in fx_chain:
|
|
if not isinstance(spec, dict):
|
|
continue
|
|
device_name = str(spec.get("device", "") or "").strip()
|
|
if not device_name:
|
|
continue
|
|
|
|
before_devices = _extract_devices_payload(ableton.send_command("get_devices", dict(base_params)))
|
|
before_count = len(before_devices)
|
|
load_params = dict(base_params)
|
|
load_params["device_name"] = device_name
|
|
load_response = ableton.send_command("load_device", load_params)
|
|
if _is_error_response(load_response):
|
|
continue
|
|
|
|
after_devices = _extract_devices_payload(ableton.send_command("get_devices", dict(base_params)))
|
|
if not after_devices:
|
|
continue
|
|
|
|
if len(after_devices) > before_count:
|
|
device_index = len(after_devices) - 1
|
|
else:
|
|
matching = [item for item in after_devices if device_name.lower() in str(item.get("name", "")).lower()]
|
|
if not matching:
|
|
continue
|
|
device_index = int(matching[-1].get("index", len(after_devices) - 1))
|
|
|
|
for param_name, value in dict(spec.get("parameters", {})).items():
|
|
try:
|
|
parameter_params = dict(base_params)
|
|
parameter_params.update({
|
|
"device_index": device_index,
|
|
"parameter": str(param_name),
|
|
"value": float(value),
|
|
})
|
|
ableton.send_command("set_device_parameter", parameter_params)
|
|
except Exception:
|
|
pass
|
|
loaded += 1
|
|
|
|
return loaded
|
|
|
|
|
|
def apply_master_chain(ableton: "AbletonConnection", config: Dict[str, Any]) -> str:
|
|
master_spec = config.get("master", {}) or {}
|
|
if not isinstance(master_spec, dict):
|
|
return ""
|
|
|
|
device_chain = [item for item in master_spec.get("device_chain", []) or [] if isinstance(item, dict)]
|
|
volume = master_spec.get("volume", None)
|
|
base_params = {"track_type": "master", "track_index": 0}
|
|
|
|
# Log master profile if present
|
|
master_profile_name = master_spec.get("profile", "default")
|
|
logger.info("Applying master profile: %s", master_profile_name)
|
|
|
|
if volume is not None:
|
|
try:
|
|
ableton.send_command("set_track_volume", {
|
|
"track_type": "master",
|
|
"track_index": 0,
|
|
"volume": float(volume),
|
|
})
|
|
logger.info("Master volume: %.3f", float(volume))
|
|
except Exception:
|
|
pass
|
|
|
|
loaded = 0
|
|
reused = 0
|
|
existing_devices = _extract_devices_payload(ableton.send_command("get_devices", dict(base_params)))
|
|
|
|
for spec in device_chain:
|
|
device_name = str(spec.get("device", "") or "").strip()
|
|
if not device_name:
|
|
continue
|
|
|
|
matching = [
|
|
item for item in existing_devices
|
|
if device_name.lower() in str(item.get("name", "")).lower()
|
|
]
|
|
|
|
if matching:
|
|
device_index = int(matching[-1].get("index", 0))
|
|
reused += 1
|
|
else:
|
|
load_params = dict(base_params)
|
|
load_params["device_name"] = device_name
|
|
load_response = ableton.send_command("load_device", load_params)
|
|
if _is_error_response(load_response):
|
|
continue
|
|
existing_devices = _extract_devices_payload(ableton.send_command("get_devices", dict(base_params)))
|
|
matching = [
|
|
item for item in existing_devices
|
|
if device_name.lower() in str(item.get("name", "")).lower()
|
|
]
|
|
if not matching:
|
|
continue
|
|
device_index = int(matching[-1].get("index", 0))
|
|
loaded += 1
|
|
|
|
for param_name, value in dict(spec.get("parameters", {})).items():
|
|
try:
|
|
parameter_params = dict(base_params)
|
|
parameter_params.update({
|
|
"device_index": device_index,
|
|
"parameter": str(param_name),
|
|
"value": float(value),
|
|
})
|
|
ableton.send_command("set_device_parameter", parameter_params)
|
|
# Log limiter gain specifically
|
|
if "limiter" in device_name.lower() and "gain" in str(param_name).lower():
|
|
logger.info("Master limiter gain: %.3f", float(value))
|
|
except Exception:
|
|
pass
|
|
|
|
if not device_chain and volume is None:
|
|
return ""
|
|
return f"Master chain: {loaded} devices nuevos, {reused} reutilizados"
|
|
|
|
|
|
def _apply_master_state(ableton: "AbletonConnection", master_state: Dict[str, Any]) -> int:
|
|
"""
|
|
Apply master chain state from performance snapshot.
|
|
|
|
Handles device_parameters for master track devices.
|
|
Returns count of applied changes.
|
|
"""
|
|
if not isinstance(master_state, dict):
|
|
return 0
|
|
|
|
applied = 0
|
|
base_params = {"track_type": "master", "track_index": 0}
|
|
|
|
# Apply volume if specified
|
|
if "volume" in master_state:
|
|
try:
|
|
ableton.send_command("set_track_volume", {
|
|
"track_type": "master",
|
|
"track_index": 0,
|
|
"volume": _linear_to_live_slider(float(master_state["volume"])),
|
|
})
|
|
applied += 1
|
|
except Exception:
|
|
pass
|
|
|
|
# Apply device parameters
|
|
for device_state in master_state.get("device_parameters", []) or []:
|
|
if not isinstance(device_state, dict):
|
|
continue
|
|
|
|
device_index = device_state.get("device_index", None)
|
|
parameter_name = str(device_state.get("parameter", "") or "").strip()
|
|
if not parameter_name:
|
|
continue
|
|
|
|
# If device_index not provided, try to find by device_name
|
|
if device_index is None:
|
|
device_name = _normalize_device_key(device_state.get("device_name", ""))
|
|
if not device_name:
|
|
continue
|
|
try:
|
|
response = ableton.send_command("get_devices", dict(base_params))
|
|
devices = _extract_devices_payload(response)
|
|
for device in devices:
|
|
if device_name in str(device.get("name", "")).lower():
|
|
device_index = int(device.get("index", 0))
|
|
break
|
|
except Exception:
|
|
continue
|
|
|
|
if device_index is None:
|
|
continue
|
|
|
|
try:
|
|
parameter_params = dict(base_params)
|
|
parameter_params.update({
|
|
"device_index": int(device_index),
|
|
"parameter": parameter_name,
|
|
"value": float(device_state.get("value", 0.0)),
|
|
})
|
|
ableton.send_command("set_device_parameter", parameter_params)
|
|
applied += 1
|
|
except Exception:
|
|
pass
|
|
|
|
return applied
|
|
|
|
|
|
def _apply_audio_track_mix(
|
|
ableton: "AbletonConnection",
|
|
track_index: int,
|
|
track_name: str,
|
|
base_volume: float,
|
|
return_mapping: Dict[str, int],
|
|
) -> Dict[str, Any]:
|
|
profile = _resolve_audio_mix_profile(track_name, base_volume)
|
|
applied_sends = 0
|
|
|
|
ableton.send_command("set_track_volume", {
|
|
"track_index": track_index,
|
|
"volume": _linear_to_live_slider(float(profile.get("volume", base_volume))),
|
|
})
|
|
ableton.send_command("set_track_pan", {
|
|
"track_index": track_index,
|
|
"pan": float(profile.get("pan", 0.0)),
|
|
})
|
|
|
|
for send_name, send_value in dict(profile.get("sends", {})).items():
|
|
send_index = return_mapping.get(str(send_name).lower(), None)
|
|
if send_index is None:
|
|
continue
|
|
try:
|
|
ableton.send_command("set_track_send", {
|
|
"track_index": track_index,
|
|
"send_index": int(send_index),
|
|
"value": float(send_value),
|
|
})
|
|
applied_sends += 1
|
|
except Exception:
|
|
pass
|
|
|
|
loaded_fx = _load_audio_fx_chain(ableton, track_index, list(profile.get("fx_chain", []) or []))
|
|
return {
|
|
"pan": float(profile.get("pan", 0.0)),
|
|
"sends": applied_sends,
|
|
"fx": loaded_fx,
|
|
}
|
|
|
|
|
|
def _ensure_mix_bus_tracks(ableton: "AbletonConnection", config: Dict[str, Any]) -> Dict[str, Dict[str, Any]]:
|
|
bus_specs = [item for item in config.get("buses", []) or [] if isinstance(item, dict)]
|
|
if not bus_specs:
|
|
return {}
|
|
|
|
tracks = _extract_tracks_payload(ableton.send_command("get_tracks"))
|
|
existing_by_name = {
|
|
_normalize_track_name(track.get("name", "")): track
|
|
for track in tracks
|
|
if isinstance(track, dict)
|
|
}
|
|
|
|
bus_mapping: Dict[str, Dict[str, Any]] = {}
|
|
for bus_spec in bus_specs:
|
|
bus_key = str(bus_spec.get("key", "") or "").strip().lower()
|
|
bus_name = str(bus_spec.get("name", bus_key.upper()) or bus_key.upper()).strip()
|
|
if not bus_key or not bus_name:
|
|
continue
|
|
|
|
normalized_name = _normalize_track_name(bus_name)
|
|
existing = existing_by_name.get(normalized_name)
|
|
created_now = False
|
|
|
|
if existing is None:
|
|
create_response = ableton.send_command("create_audio_track", {"index": -1})
|
|
if _is_error_response(create_response):
|
|
continue
|
|
track_index = create_response.get("result", {}).get("index")
|
|
if track_index is None:
|
|
continue
|
|
created_now = True
|
|
else:
|
|
track_index = int(existing.get("index", -1))
|
|
if track_index < 0:
|
|
continue
|
|
|
|
ableton.send_command("set_track_name", {"track_index": track_index, "name": bus_name})
|
|
ableton.send_command("set_track_color", {
|
|
"track_index": track_index,
|
|
"color": int(bus_spec.get("color", 58)),
|
|
})
|
|
calibrated_volume = float(bus_spec.get("volume", 0.8))
|
|
ableton.send_command("set_track_volume", {
|
|
"track_index": track_index,
|
|
"volume": _linear_to_live_slider_bus(calibrated_volume),
|
|
})
|
|
logger.info("Bus %s calibrated volume: %.3f", bus_name, calibrated_volume)
|
|
ableton.send_command("set_track_pan", {
|
|
"track_index": track_index,
|
|
"pan": float(bus_spec.get("pan", 0.0)),
|
|
})
|
|
try:
|
|
ableton.send_command("set_track_monitoring", {
|
|
"track_index": track_index,
|
|
"mode": str(bus_spec.get("monitoring", "in")),
|
|
})
|
|
except Exception:
|
|
pass
|
|
|
|
devices = _extract_devices_payload(ableton.send_command("get_devices", {"track_index": track_index}))
|
|
if created_now or not devices:
|
|
_load_audio_fx_chain(ableton, track_index, list(bus_spec.get("fx_chain", []) or []))
|
|
|
|
bus_mapping[bus_key] = {
|
|
"track_index": int(track_index),
|
|
"name": bus_name,
|
|
"created": created_now,
|
|
}
|
|
|
|
return bus_mapping
|
|
|
|
|
|
def _route_track_to_mix_bus(ableton: "AbletonConnection", track_index: int, bus_name: str) -> bool:
|
|
routing_response = ableton.send_command("get_track_routing", {"track_index": int(track_index)})
|
|
if _is_error_response(routing_response):
|
|
return False
|
|
|
|
routing = routing_response.get("result", {})
|
|
current_output = _normalize_track_name(routing.get("current_output_routing", ""))
|
|
normalized_bus_name = _normalize_track_name(bus_name)
|
|
if current_output == normalized_bus_name:
|
|
return True
|
|
|
|
available = list(routing.get("available_output_routing_types", []) or [])
|
|
matched = next(
|
|
(option for option in available if _normalize_track_name(option) == normalized_bus_name),
|
|
None,
|
|
)
|
|
if not matched:
|
|
return False
|
|
|
|
response = ableton.send_command("set_track_output_routing", {
|
|
"track_index": int(track_index),
|
|
"routing_name": matched,
|
|
})
|
|
return not _is_error_response(response)
|
|
|
|
|
|
def apply_mix_bus_architecture(ableton: "AbletonConnection", config: Dict[str, Any]) -> str:
|
|
bus_mapping = _ensure_mix_bus_tracks(ableton, config)
|
|
if not bus_mapping:
|
|
return ""
|
|
|
|
config_track_bus_map = _build_config_track_bus_map(config)
|
|
bus_track_indices = {int(item["track_index"]) for item in bus_mapping.values()}
|
|
tracks = _extract_tracks_payload(ableton.send_command("get_tracks"))
|
|
|
|
routed = 0
|
|
skipped = 0
|
|
for track in tracks:
|
|
if not isinstance(track, dict):
|
|
continue
|
|
track_index = int(track.get("index", -1))
|
|
if track_index < 0 or track_index in bus_track_indices:
|
|
continue
|
|
|
|
bus_key = _resolve_bus_key_for_track(track.get("name", ""), config_track_bus_map)
|
|
if not bus_key or bus_key not in bus_mapping:
|
|
continue
|
|
|
|
if _route_track_to_mix_bus(ableton, track_index, bus_mapping[bus_key]["name"]):
|
|
routed += 1
|
|
else:
|
|
skipped += 1
|
|
|
|
created_count = sum(1 for item in bus_mapping.values() if item.get("created"))
|
|
reused_count = len(bus_mapping) - created_count
|
|
return (
|
|
f"Mix buses: {len(bus_mapping)} buses "
|
|
f"({created_count} nuevos, {reused_count} reutilizados), "
|
|
f"{routed} routings, {skipped} omitidos"
|
|
)
|
|
|
|
|
|
def _log_gain_staging_summary(config: Dict[str, Any]) -> None:
|
|
"""Log the gain staging summary from the config."""
|
|
summary = config.get('gain_staging_summary', {})
|
|
if not summary:
|
|
return
|
|
|
|
logger.info("=== Gain Staging Summary ===")
|
|
logger.info("Master profile: %s", summary.get('master_profile_used'))
|
|
logger.info("Style adjustments: %s", summary.get('style_adjustments_applied'))
|
|
logger.info("Bus volumes: %s", summary.get('bus_volumes'))
|
|
logger.info("Track volume overrides: %d", summary.get('track_volume_overrides_count', 0))
|
|
logger.info("Peak reductions: %d", summary.get('peak_reductions_applied_count', 0))
|
|
logger.info("Headroom target: %s dB", summary.get('headroom_target_db'))
|
|
|
|
warnings = summary.get('warnings', [])
|
|
if warnings:
|
|
logger.warning("Gain staging warnings: %s", warnings)
|
|
|
|
|
|
def _iter_device_parameter_states(items: Any) -> List[Dict[str, Any]]:
|
|
flattened: List[Dict[str, Any]] = []
|
|
for item in items or []:
|
|
if not isinstance(item, dict):
|
|
continue
|
|
if "parameter" in item and "value" in item:
|
|
flattened.append(item)
|
|
continue
|
|
device_name = str(item.get("device_name", "") or item.get("name", "")).strip()
|
|
for parameter_name, value in dict(item.get("parameters", {})).items():
|
|
flattened.append({
|
|
"device_name": device_name,
|
|
"parameter": parameter_name,
|
|
"value": value,
|
|
})
|
|
return flattened
|
|
|
|
|
|
def _apply_performance_snapshot(
|
|
ableton: "AbletonConnection",
|
|
snapshot: Dict[str, Any],
|
|
return_mapping: Dict[str, int],
|
|
return_device_lookup: Optional[Dict[int, Dict[str, List[int]]]] = None,
|
|
track_device_lookup: Optional[Dict[int, Dict[str, List[int]]]] = None,
|
|
bus_device_lookup: Optional[Dict[int, Dict[str, List[int]]]] = None,
|
|
master_device_lookup: Optional[Dict[str, List[int]]] = None,
|
|
bus_mapping: Optional[Dict[str, Dict[str, Any]]] = None,
|
|
) -> int:
|
|
if not isinstance(snapshot, dict):
|
|
return 0
|
|
|
|
applied = 0
|
|
for track_state in snapshot.get("track_states", []) or []:
|
|
if not isinstance(track_state, dict):
|
|
continue
|
|
track_index = track_state.get("track_index", None)
|
|
if track_index is None:
|
|
continue
|
|
|
|
if "mute" in track_state:
|
|
try:
|
|
ableton.send_command("set_track_mute", {
|
|
"track_index": track_index,
|
|
"mute": bool(track_state.get("mute", False)),
|
|
})
|
|
applied += 1
|
|
except Exception:
|
|
pass
|
|
|
|
if "volume" in track_state:
|
|
try:
|
|
calibrated_volume = float(track_state.get("volume", 0.72))
|
|
ableton.send_command("set_track_volume", {
|
|
"track_index": track_index,
|
|
"volume": _linear_to_live_slider(calibrated_volume),
|
|
})
|
|
logger.debug("Track %d calibrated volume: %.3f", track_index, calibrated_volume)
|
|
applied += 1
|
|
except Exception:
|
|
pass
|
|
|
|
if "pan" in track_state:
|
|
try:
|
|
ableton.send_command("set_track_pan", {
|
|
"track_index": track_index,
|
|
"pan": float(track_state.get("pan", 0.0)),
|
|
})
|
|
applied += 1
|
|
except Exception:
|
|
pass
|
|
|
|
for send_name, send_value in dict(track_state.get("sends", {})).items():
|
|
send_index = return_mapping.get(str(send_name).lower(), None)
|
|
if send_index is None:
|
|
continue
|
|
try:
|
|
ableton.send_command("set_track_send", {
|
|
"track_index": track_index,
|
|
"send_index": send_index,
|
|
"value": float(send_value),
|
|
})
|
|
applied += 1
|
|
except Exception:
|
|
pass
|
|
|
|
# Apply device parameters for regular tracks
|
|
devices_for_track = dict((track_device_lookup or {}).get(int(track_index), {}))
|
|
for device_state in _iter_device_parameter_states(track_state.get("device_parameters", [])):
|
|
if not isinstance(device_state, dict):
|
|
continue
|
|
parameter_name = str(device_state.get("parameter", "") or "").strip()
|
|
if not parameter_name:
|
|
continue
|
|
|
|
device_index = device_state.get("device_index", None)
|
|
if device_index is None:
|
|
normalized_name = _normalize_device_key(device_state.get("device_name", ""))
|
|
candidates = devices_for_track.get(normalized_name, [])
|
|
if candidates:
|
|
device_index = candidates[0]
|
|
if device_index is None:
|
|
continue
|
|
|
|
try:
|
|
ableton.send_command("set_device_parameter", {
|
|
"track_index": int(track_index),
|
|
"device_index": int(device_index),
|
|
"parameter": parameter_name,
|
|
"value": float(device_state.get("value", 0.0)),
|
|
})
|
|
applied += 1
|
|
except Exception:
|
|
pass
|
|
|
|
for return_state in snapshot.get("return_states", []) or []:
|
|
if not isinstance(return_state, dict):
|
|
continue
|
|
|
|
return_index = return_state.get("return_index", None)
|
|
if return_index is None:
|
|
send_key = str(return_state.get("send_key", "")).strip().lower()
|
|
return_index = return_mapping.get(send_key, None)
|
|
if return_index is None:
|
|
continue
|
|
return_index = int(return_index)
|
|
|
|
if "mute" in return_state:
|
|
try:
|
|
ableton.send_command("set_track_mute", {
|
|
"track_type": "return",
|
|
"track_index": return_index,
|
|
"mute": bool(return_state.get("mute", False)),
|
|
})
|
|
applied += 1
|
|
except Exception:
|
|
pass
|
|
|
|
if "volume" in return_state:
|
|
try:
|
|
ableton.send_command("set_track_volume", {
|
|
"track_type": "return",
|
|
"track_index": return_index,
|
|
"volume": _linear_to_live_slider(float(return_state.get("volume", 0.72))),
|
|
})
|
|
applied += 1
|
|
except Exception:
|
|
pass
|
|
|
|
if "pan" in return_state:
|
|
try:
|
|
ableton.send_command("set_track_pan", {
|
|
"track_type": "return",
|
|
"track_index": return_index,
|
|
"pan": float(return_state.get("pan", 0.0)),
|
|
})
|
|
applied += 1
|
|
except Exception:
|
|
pass
|
|
|
|
devices_for_return = dict((return_device_lookup or {}).get(return_index, {}))
|
|
for device_state in _iter_device_parameter_states(return_state.get("device_parameters", [])):
|
|
if not isinstance(device_state, dict):
|
|
continue
|
|
parameter_name = str(device_state.get("parameter", "") or "").strip()
|
|
if not parameter_name:
|
|
continue
|
|
|
|
device_index = device_state.get("device_index", None)
|
|
if device_index is None:
|
|
normalized_name = _normalize_device_key(device_state.get("device_name", ""))
|
|
candidates = devices_for_return.get(normalized_name, [])
|
|
if candidates:
|
|
device_index = candidates[0]
|
|
if device_index is None:
|
|
continue
|
|
|
|
try:
|
|
ableton.send_command("set_device_parameter", {
|
|
"track_type": "return",
|
|
"track_index": return_index,
|
|
"device_index": int(device_index),
|
|
"parameter": parameter_name,
|
|
"value": float(device_state.get("value", 0.0)),
|
|
})
|
|
applied += 1
|
|
except Exception:
|
|
pass
|
|
|
|
# Apply bus states
|
|
bus_states = snapshot.get("bus_states", [])
|
|
if bus_states and bus_mapping:
|
|
bus_key_to_index: Dict[str, int] = {}
|
|
for bus_key, bus_info in (bus_mapping or {}).items():
|
|
bus_key_to_index[str(bus_key).lower()] = int(bus_info.get("track_index", -1))
|
|
for bus_state in bus_states:
|
|
if not isinstance(bus_state, dict):
|
|
continue
|
|
bus_key = str(bus_state.get("bus_key", "")).lower()
|
|
if not bus_key:
|
|
continue
|
|
bus_track_index = bus_key_to_index.get(bus_key, None)
|
|
if bus_track_index is None or bus_track_index <0:
|
|
continue
|
|
devices_for_bus = dict((bus_device_lookup or {}).get(bus_track_index, {}))
|
|
for device_state in _iter_device_parameter_states(bus_state.get("device_parameters", [])):
|
|
if not isinstance(device_state, dict):
|
|
continue
|
|
parameter_name = str(device_state.get("parameter", "") or "").strip()
|
|
if not parameter_name:
|
|
continue
|
|
device_index = device_state.get("device_index", None)
|
|
if device_index is None:
|
|
normalized_name = _normalize_device_key(device_state.get("device_name", ""))
|
|
candidates = devices_for_bus.get(normalized_name, [])
|
|
if candidates:
|
|
device_index = candidates[0]
|
|
if device_index is None:
|
|
continue
|
|
try:
|
|
ableton.send_command("set_device_parameter", {
|
|
"track_index": int(bus_track_index),
|
|
"device_index": int(device_index),
|
|
"parameter": parameter_name,
|
|
"value": float(device_state.get("value", 0.0)),
|
|
})
|
|
applied +=1
|
|
except Exception:
|
|
pass
|
|
|
|
# Apply master state
|
|
master_state = snapshot.get("master_state", {})
|
|
if isinstance(master_state, dict) and master_state:
|
|
# Apply master volume if specified
|
|
if "volume" in master_state:
|
|
try:
|
|
ableton.send_command("set_track_volume", {
|
|
"track_type": "master",
|
|
"track_index": 0,
|
|
"volume": float(master_state["volume"]),
|
|
})
|
|
applied += 1
|
|
except Exception:
|
|
pass
|
|
|
|
# Apply master device parameters
|
|
for device_state in _iter_device_parameter_states(master_state.get("device_parameters", [])):
|
|
if not isinstance(device_state, dict):
|
|
continue
|
|
parameter_name = str(device_state.get("parameter", "") or "").strip()
|
|
if not parameter_name:
|
|
continue
|
|
|
|
device_index = device_state.get("device_index", None)
|
|
if device_index is None:
|
|
normalized_name = _normalize_device_key(device_state.get("device_name", ""))
|
|
candidates = dict(master_device_lookup or {}).get(normalized_name, [])
|
|
if candidates:
|
|
device_index = candidates[0]
|
|
if device_index is None:
|
|
continue
|
|
|
|
try:
|
|
ableton.send_command("set_device_parameter", {
|
|
"track_type": "master",
|
|
"track_index": 0,
|
|
"device_index": int(device_index),
|
|
"parameter": parameter_name,
|
|
"value": float(device_state.get("value", 0.0)),
|
|
})
|
|
applied += 1
|
|
except Exception:
|
|
pass
|
|
|
|
return applied
|
|
|
|
|
|
def _resolve_arrangement_locators(config: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|
locators = config.get("locators", []) or []
|
|
if isinstance(locators, list) and locators:
|
|
return [item for item in locators if isinstance(item, dict)]
|
|
|
|
resolved: List[Dict[str, Any]] = []
|
|
arrangement_time = 0.0
|
|
for index, section in enumerate(config.get("sections", []) or []):
|
|
if not isinstance(section, dict):
|
|
continue
|
|
beats = float(section.get("beats", 0.0) or (float(section.get("bars", 8)) * 4.0))
|
|
resolved.append({
|
|
"scene_index": int(section.get("index", index)),
|
|
"name": str(section.get("name", "SECTION")),
|
|
"bars": int(section.get("bars", max(1, int(beats / 4.0) if beats else 8))),
|
|
"color": int(section.get("color", 62)),
|
|
"time_beats": arrangement_time,
|
|
})
|
|
arrangement_time += max(1.0, beats)
|
|
return resolved
|
|
|
|
|
|
def _prepare_arrangement_guide_scene_track(ableton: "AbletonConnection", config: Dict[str, Any]) -> str:
|
|
locators = _resolve_arrangement_locators(config)
|
|
if not locators:
|
|
return ""
|
|
|
|
create_response = ableton.send_command("create_midi_track", {"index": -1})
|
|
if _is_error_response(create_response):
|
|
raise RuntimeError(create_response.get("message", "No se pudo crear ARRANGEMENT GUIDE"))
|
|
|
|
guide_index = create_response.get("result", {}).get("index")
|
|
if guide_index is None:
|
|
session_response = ableton.send_command("get_session_info")
|
|
if _is_error_response(session_response):
|
|
raise RuntimeError("No se pudo resolver el indice de ARRANGEMENT GUIDE")
|
|
guide_index = max(0, int(session_response.get("result", {}).get("num_tracks", 1)) - 1)
|
|
|
|
ableton.send_command("set_track_name", {"track_index": guide_index, "name": "ARRANGEMENT GUIDE"})
|
|
ableton.send_command("set_track_color", {"track_index": guide_index, "color": 62})
|
|
ableton.send_command("set_track_volume", {"track_index": guide_index, "volume": 0.0})
|
|
ableton.send_command("set_track_mute", {"track_index": guide_index, "mute": True})
|
|
|
|
created_clips = 0
|
|
for locator in locators:
|
|
scene_index = int(locator.get("scene_index", created_clips))
|
|
bars = int(locator.get("bars", 8) or 8)
|
|
clip_response = ableton.send_command("create_clip", {
|
|
"track_index": guide_index,
|
|
"clip_index": scene_index,
|
|
"length": max(1.0, bars * 4.0),
|
|
"name": "{} [{} bars]".format(locator.get("name", "SECTION"), bars),
|
|
})
|
|
if not _is_error_response(clip_response):
|
|
ableton.send_command("set_clip_color", {
|
|
"track_index": guide_index,
|
|
"clip_index": scene_index,
|
|
"color": int(locator.get("color", 62)),
|
|
})
|
|
ableton.send_command("add_notes", {
|
|
"track_index": guide_index,
|
|
"clip_index": scene_index,
|
|
"notes": [{"pitch": 24, "start": 0.0, "duration": 0.05, "velocity": 1}],
|
|
})
|
|
created_clips += 1
|
|
|
|
return "Guide track listo: {} clips de sección".format(created_clips)
|
|
|
|
|
|
def apply_arrangement_markers(ableton: "AbletonConnection", config: Dict[str, Any]) -> str:
|
|
locators = _resolve_arrangement_locators(config)
|
|
if not locators:
|
|
return ""
|
|
|
|
created_cues = 0
|
|
for locator in locators:
|
|
time_beats = float(locator.get("time_beats", 0.0) or 0.0)
|
|
cue_response = ableton.send_command("create_cue_point", {"time": time_beats})
|
|
if not _is_error_response(cue_response):
|
|
created_cues += 1
|
|
|
|
ableton.send_command("jump_to", {"time": 0})
|
|
ableton.send_command("show_arrangement_view")
|
|
|
|
return "Markers de Arrangement: {} locators".format(created_cues)
|
|
|
|
def commit_session_blueprint_to_arrangement(ableton: "AbletonConnection", config: Dict[str, Any]) -> str:
|
|
"""Graba escenas de Session en Arrangement cuando la API no soporta create_midi_clip."""
|
|
sections = config.get("sections", []) or []
|
|
performance = config.get("performance", []) or []
|
|
performance_by_scene = {
|
|
int(item.get("scene_index", index)): item
|
|
for index, item in enumerate(performance)
|
|
if isinstance(item, dict)
|
|
}
|
|
return_mapping = _build_return_send_mapping(config)
|
|
return_device_lookup = _build_return_device_lookup(ableton, config)
|
|
|
|
# Build track device lookup for device parameters on regular tracks
|
|
track_indices = []
|
|
for track in config.get("tracks", []) or []:
|
|
if isinstance(track, dict) and "index" in track:
|
|
track_indices.append(int(track["index"]))
|
|
track_device_lookup = _build_track_device_lookup(ableton, track_indices) if track_indices else {}
|
|
|
|
# Build master device lookup for device parameters on master track
|
|
master_device_lookup: Dict[str, List[int]] = {}
|
|
try:
|
|
response = ableton.send_command("get_devices", {"track_type": "master", "track_index": 0})
|
|
for device in _extract_devices_payload(response):
|
|
normalized_name = _normalize_device_key(device.get("name", ""))
|
|
if normalized_name:
|
|
master_device_lookup.setdefault(normalized_name, []).append(int(device.get("index", 0)))
|
|
except Exception:
|
|
pass
|
|
|
|
# Build bus device lookup for device parameters on bus tracks
|
|
bus_mapping = _ensure_mix_bus_tracks(ableton, config)
|
|
bus_device_lookup = _build_bus_device_lookup(ableton, bus_mapping) if bus_mapping else {}
|
|
|
|
bpm = float(config.get("bpm", 120) or 120)
|
|
if not sections:
|
|
raise RuntimeError("El blueprint no incluye sections para el commit a Arrangement")
|
|
|
|
total_beats = 0.0
|
|
for section in sections:
|
|
beats = section.get("beats", None)
|
|
if beats is None:
|
|
beats = float(section.get("bars", 8)) * 4.0
|
|
total_beats += max(1.0, float(beats))
|
|
|
|
guide_result = _prepare_arrangement_guide_scene_track(ableton, config)
|
|
|
|
try:
|
|
ableton.send_command("stop")
|
|
except Exception:
|
|
pass
|
|
|
|
ableton.send_command("show_arrangement_view")
|
|
ableton.send_command("loop_selection", {"start": 0, "length": total_beats, "enable": False})
|
|
ableton.send_command("jump_to", {"time": 0})
|
|
ableton.send_command("set_record_mode", {"enabled": True})
|
|
snapshot_changes = _apply_performance_snapshot(
|
|
ableton,
|
|
performance_by_scene.get(0, {}),
|
|
return_mapping,
|
|
return_device_lookup,
|
|
track_device_lookup,
|
|
bus_device_lookup,
|
|
master_device_lookup,
|
|
bus_mapping,
|
|
)
|
|
ableton.send_command("fire_scene", {"scene_index": 0})
|
|
time.sleep(0.15)
|
|
ableton.send_command("start_playback")
|
|
|
|
start_time = time.monotonic()
|
|
elapsed_beats = 0.0
|
|
for next_scene_index, section in enumerate(sections[1:], start=1):
|
|
previous = sections[next_scene_index - 1]
|
|
previous_beats = previous.get("beats", None)
|
|
if previous_beats is None:
|
|
previous_beats = float(previous.get("bars", 8)) * 4.0
|
|
elapsed_beats += max(1.0, float(previous_beats))
|
|
boundary_time = start_time + (elapsed_beats * 60.0 / bpm) - 0.25
|
|
_sleep_until(boundary_time - 0.12)
|
|
snapshot_changes += _apply_performance_snapshot(
|
|
ableton,
|
|
performance_by_scene.get(next_scene_index, {}),
|
|
return_mapping,
|
|
return_device_lookup,
|
|
track_device_lookup,
|
|
bus_device_lookup,
|
|
master_device_lookup,
|
|
bus_mapping,
|
|
)
|
|
_sleep_until(boundary_time)
|
|
ableton.send_command("fire_scene", {"scene_index": next_scene_index})
|
|
|
|
finish_time = start_time + (total_beats * 60.0 / bpm) + 0.35
|
|
_sleep_until(finish_time)
|
|
ableton.send_command("stop")
|
|
ableton.send_command("set_record_mode", {"enabled": False})
|
|
ableton.send_command("jump_to", {"time": 0})
|
|
ableton.send_command("show_arrangement_view")
|
|
|
|
commit_result = "Commit a Arrangement completado: {} scenes, {:.1f}s, {} snapshots".format(
|
|
len(sections),
|
|
total_beats * 60.0 / bpm,
|
|
len(performance_by_scene) if performance_by_scene else snapshot_changes,
|
|
)
|
|
if guide_result:
|
|
commit_result = "{} | {}".format(commit_result, guide_result)
|
|
return commit_result
|
|
|
|
# Instrucciones para el productor (contexto de IA)
|
|
PRODUCER_INSTRUCTIONS = """
|
|
Eres AbletonMCP-AI, un productor musical experto integrado con Ableton Live 12.
|
|
Tu objetivo es crear música electrónica profesional mediante prompts en lenguaje natural.
|
|
|
|
CAPACIDADES PRINCIPALES:
|
|
1. Generar tracks completos con estructura profesional (Intro, Build, Drop, Break, Outro)
|
|
2. Crear patrones MIDI para diferentes géneros (Techno, House, Trance, Tech-House, etc.)
|
|
3. Seleccionar y cargar samples apropiados para cada elemento (kick, clap, hat, bass, synth)
|
|
4. Configurar BPM, tonalidad y estructura musical
|
|
5. Aplicar procesamiento de señal básico (volumen, panorama, mute/solo)
|
|
|
|
ESTILOS SOPORTADOS:
|
|
- Techno: Industrial, Peak Time, Dub, Minimal
|
|
- House: Deep, Tech-House, Progressive, Afro, Classic 90s
|
|
- Trance: Psy, Progressive, Uplifting
|
|
- Otros: Drum & Bass, Garage, EBM
|
|
|
|
FLUJO DE TRABAJO:
|
|
1. Analizar el prompt del usuario para extraer género, BPM, tonalidad, mood
|
|
2. Seleccionar samples apropiados del índice
|
|
3. Generar patrones MIDI característicos del género
|
|
4. Crear estructura de tracks en Ableton
|
|
5. Configurar mezcla básica (niveles, paneo)
|
|
6. Proporcionar feedback sobre lo creado
|
|
|
|
REGLAS:
|
|
- Siempre verifica la conexión con Ableton antes de ejecutar comandos
|
|
- Usa valores por defecto razonables si el usuario no especifica
|
|
- Organiza los tracks con colores consistentes (Drums=Rojo, Bass=Azul, Synths=Amarillo, etc.)
|
|
- Crea clips nombrados apropiadamente ("Kick Loop", "Bassline", "Chord Stab")
|
|
- Mantén headroom en la mezcla (master sin clip)
|
|
""".strip()
|
|
|
|
|
|
def _normalize_command_payload(command_type: str, params: Optional[Dict[str, Any]]) -> Tuple[str, Dict[str, Any]]:
|
|
"""Normalize MCP-level aliases to the Remote Script protocol."""
|
|
normalized_type = command_type
|
|
normalized_params = dict(params or {})
|
|
|
|
if normalized_type == "create_midi_track":
|
|
normalized_type = "create_track"
|
|
normalized_params.setdefault("type", "midi")
|
|
elif normalized_type == "create_audio_track":
|
|
normalized_type = "create_track"
|
|
normalized_params.setdefault("type", "audio")
|
|
elif normalized_type == "add_notes_to_clip":
|
|
normalized_type = "add_notes"
|
|
elif normalized_type == "start_playback":
|
|
normalized_type = "play"
|
|
elif normalized_type == "stop_playback":
|
|
normalized_type = "stop"
|
|
elif normalized_type == "generate_track":
|
|
normalized_type = "generate_complete_song"
|
|
|
|
if normalized_type in TRACK_INDEX_COMMANDS and "track_index" in normalized_params:
|
|
normalized_params.setdefault("index", normalized_params["track_index"])
|
|
|
|
if normalized_type in CLIP_SCENE_COMMANDS and "clip_index" in normalized_params:
|
|
normalized_params.setdefault("scene_index", normalized_params["clip_index"])
|
|
|
|
if normalized_type in SCENE_INDEX_COMMANDS and "scene_index" in normalized_params:
|
|
normalized_params.setdefault("index", normalized_params["scene_index"])
|
|
|
|
return normalized_type, normalized_params
|
|
|
|
|
|
def _is_error_response(response: Dict[str, Any]) -> bool:
|
|
return response.get("status") != "success"
|
|
|
|
|
|
@dataclass
|
|
class AbletonConnection:
|
|
"""Gestiona la conexión con Ableton Live"""
|
|
host: str = HOST
|
|
port: int = DEFAULT_PORT
|
|
sock: Optional[socket.socket] = None
|
|
_connection_timeout: float = 5.0
|
|
_max_retries: int = 3
|
|
_retry_delay: float = 0.5
|
|
|
|
def connect(self) -> bool:
|
|
"""Conecta al Remote Script de Ableton"""
|
|
if self.sock:
|
|
return True
|
|
|
|
last_error = None
|
|
for attempt in range(self._max_retries):
|
|
try:
|
|
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
self.sock.settimeout(self._connection_timeout)
|
|
self.sock.connect((self.host, self.port))
|
|
logger.info(f"Conectado a Ableton en {self.host}:{self.port}")
|
|
return True
|
|
except socket.timeout as e:
|
|
last_error = e
|
|
logger.warning(f"Timeout conectando a Ableton (intento {attempt + 1}/{self._max_retries})")
|
|
except ConnectionRefusedError as e:
|
|
last_error = e
|
|
logger.warning(f"Conexion rechazada por Ableton (intento {attempt + 1}/{self._max_retries})")
|
|
except OSError as e:
|
|
last_error = e
|
|
logger.warning(f"Error de OS conectando a Ableton: {e} (intento {attempt + 1}/{self._max_retries})")
|
|
except Exception as e:
|
|
last_error = e
|
|
logger.error(f"Error inesperado conectando a Ableton: {e}")
|
|
|
|
self.sock = None
|
|
if attempt < self._max_retries - 1:
|
|
time.sleep(self._retry_delay)
|
|
|
|
logger.error(f"Error conectando a Ableton despues de {self._max_retries} intentos: {last_error}")
|
|
return False
|
|
|
|
def disconnect(self):
|
|
"""Desconecta de Ableton"""
|
|
if self.sock:
|
|
try:
|
|
self.sock.shutdown(socket.SHUT_RDWR)
|
|
except OSError:
|
|
pass
|
|
except Exception as e:
|
|
logger.debug(f"Error en shutdown de socket: {e}")
|
|
try:
|
|
self.sock.close()
|
|
except Exception as e:
|
|
logger.debug(f"Error cerrando socket: {e}")
|
|
finally:
|
|
self.sock = None
|
|
|
|
def _validate_command_params(self, command_type: str, params: Optional[Dict[str, Any]]) -> Dict[str, Any]:
|
|
"""Validate and normalize command parameters."""
|
|
if params is None:
|
|
return {}
|
|
|
|
if not isinstance(params, dict):
|
|
raise ValidationError("params", params, "dictionary")
|
|
|
|
return params
|
|
|
|
def send_command(self, command_type: str, params: Dict[str, Any] = None, timeout: float = 15.0) -> Dict[str, Any]:
|
|
"""Envía un comando a Ableton y retorna la respuesta"""
|
|
try:
|
|
_validate_string(command_type, "command_type", allow_empty=False)
|
|
except ValidationError:
|
|
raise ValidationError("command_type", command_type, "non-empty string")
|
|
|
|
if self.sock:
|
|
self.disconnect()
|
|
|
|
normalized_type, normalized_params = _normalize_command_payload(command_type, params)
|
|
resolved_timeout = max(float(timeout or 0.0), COMMAND_TIMEOUTS.get(normalized_type, 15.0))
|
|
|
|
command = {
|
|
"type": normalized_type,
|
|
"params": normalized_params
|
|
}
|
|
|
|
operation_id = f"{normalized_type}_{int(time.time() * 1000)}"
|
|
start_time = time.monotonic()
|
|
|
|
try:
|
|
if normalized_type != command_type:
|
|
logger.info(f"Enviando comando: {command_type} -> {normalized_type}")
|
|
else:
|
|
logger.info(f"Enviando comando: {command_type}")
|
|
|
|
payload = json.dumps(command, separators=(',', ':')).encode('utf-8') + MESSAGE_TERMINATOR
|
|
|
|
sock = None
|
|
try:
|
|
sock = socket.create_connection((self.host, self.port), timeout=resolved_timeout)
|
|
sock.settimeout(resolved_timeout)
|
|
sock.sendall(payload)
|
|
|
|
buffer = b""
|
|
chunks_received = 0
|
|
max_chunks = 1000 # Prevent infinite loops
|
|
|
|
while chunks_received < max_chunks:
|
|
try:
|
|
chunk = sock.recv(8192)
|
|
if not chunk:
|
|
logger.warning(f"Conexion cerrada por Ableton despues de {chunks_received} chunks")
|
|
break
|
|
|
|
chunks_received += 1
|
|
buffer += chunk
|
|
|
|
if MESSAGE_TERMINATOR not in buffer:
|
|
continue
|
|
|
|
raw_response, _, remainder = buffer.partition(MESSAGE_TERMINATOR)
|
|
buffer = remainder
|
|
|
|
try:
|
|
response = json.loads(raw_response.decode('utf-8'))
|
|
elapsed = time.monotonic() - start_time
|
|
logger.debug(f"Comando {normalized_type} completado en {elapsed:.3f}s")
|
|
return response
|
|
except json.JSONDecodeError as e:
|
|
logger.warning(f"Respuesta JSON invalida: {e}")
|
|
continue
|
|
|
|
except socket.timeout:
|
|
elapsed = time.monotonic() - start_time
|
|
logger.warning(f"Timeout esperando respuesta despues de {elapsed:.1f}s")
|
|
raise TimeoutError(normalized_type, resolved_timeout, {
|
|
"operation_id": operation_id,
|
|
"elapsed_seconds": elapsed
|
|
})
|
|
|
|
# Si llegamos aqui, la respuesta puede estar incompleta
|
|
if buffer:
|
|
try:
|
|
response = json.loads(buffer.decode('utf-8').strip())
|
|
logger.warning("Respuesta JSON recibida sin terminador")
|
|
return response
|
|
except json.JSONDecodeError as e:
|
|
raise ConnectionError(f"Respuesta JSON incompleta: {e}")
|
|
|
|
raise ConnectionError("No se recibio respuesta de Ableton")
|
|
|
|
finally:
|
|
if sock:
|
|
try:
|
|
sock.close()
|
|
except Exception:
|
|
pass
|
|
|
|
except MCPError:
|
|
raise
|
|
except socket.timeout:
|
|
elapsed = time.monotonic() - start_time
|
|
raise TimeoutError(normalized_type, resolved_timeout, {
|
|
"operation_id": operation_id,
|
|
"elapsed_seconds": elapsed
|
|
})
|
|
except ConnectionRefusedError:
|
|
raise ConnectionError(f"Ableton no esta aceptando conexiones en {self.host}:{self.port}")
|
|
except Exception as e:
|
|
_log_error(e, context=f"send_command({normalized_type})")
|
|
raise ConnectionError(f"Error de comunicacion con Ableton: {e}")
|
|
|
|
|
|
# Conexión global
|
|
_ableton_connection: Optional[AbletonConnection] = None
|
|
_sample_index: Optional['SampleIndex'] = None
|
|
_song_generator: Optional['SongGenerator'] = None
|
|
_sample_manager: Optional['SampleManager'] = None
|
|
_sample_selector: Optional['SampleSelector'] = None
|
|
_reference_listener: Optional['ReferenceAudioListener'] = None
|
|
_audio_resampler: Optional['AudioResampler'] = None
|
|
|
|
|
|
def get_ableton_connection() -> AbletonConnection:
|
|
"""Obtiene o crea la conexión con Ableton"""
|
|
global _ableton_connection
|
|
if _ableton_connection is None:
|
|
_ableton_connection = AbletonConnection()
|
|
return _ableton_connection
|
|
|
|
|
|
def _ensure_ableton_connection() -> AbletonConnection:
|
|
"""Ensure Ableton connection is available, raise ConnectionError if not."""
|
|
ableton = get_ableton_connection()
|
|
if ableton is None:
|
|
raise ConnectionError("Ableton connection not initialized")
|
|
return ableton
|
|
|
|
|
|
def get_sample_index() -> 'SampleIndex':
|
|
"""Obtiene o crea el índice de samples"""
|
|
global _sample_index
|
|
if _sample_index is None and SampleIndex is not None:
|
|
try:
|
|
_sample_index = SampleIndex(SAMPLES_DIR)
|
|
except Exception as e:
|
|
_log_error(e, context="get_sample_index")
|
|
raise DependencyError("SampleIndex", {"original_error": str(e)})
|
|
elif SampleIndex is None:
|
|
raise DependencyError("SampleIndex")
|
|
return _sample_index
|
|
|
|
|
|
def get_sample_manager() -> Optional['SampleManager']:
|
|
"""Obtiene o crea el gestor de samples"""
|
|
global _sample_manager
|
|
if _sample_manager is None and SAMPLE_SYSTEM_AVAILABLE and sample_manager_factory is not None:
|
|
try:
|
|
_sample_manager = sample_manager_factory(SAMPLES_DIR)
|
|
except Exception as e:
|
|
_log_error(e, context="get_sample_manager")
|
|
return None
|
|
return _sample_manager
|
|
|
|
|
|
def _ensure_sample_manager() -> 'SampleManager':
|
|
"""Ensure SampleManager is available, raise DependencyError if not."""
|
|
manager = get_sample_manager()
|
|
if manager is None:
|
|
raise DependencyError("SampleManager")
|
|
return manager
|
|
|
|
|
|
def get_sample_selector() -> Optional['SampleSelector']:
|
|
"""Obtiene o crea el selector de samples"""
|
|
global _sample_selector
|
|
if _sample_selector is None and SAMPLE_SYSTEM_AVAILABLE and SampleSelector is not None:
|
|
try:
|
|
manager = get_sample_manager()
|
|
if manager:
|
|
_sample_selector = SampleSelector(manager)
|
|
except Exception as e:
|
|
_log_error(e, context="get_sample_selector")
|
|
return None
|
|
return _sample_selector
|
|
|
|
|
|
def _ensure_sample_selector() -> 'SampleSelector':
|
|
"""Ensure SampleSelector is available, raise DependencyError if not."""
|
|
selector = get_sample_selector()
|
|
if selector is None:
|
|
raise DependencyError("SampleSelector")
|
|
return selector
|
|
|
|
|
|
def get_song_generator() -> 'SongGenerator':
|
|
"""Obtiene o crea el generador de canciones"""
|
|
global _song_generator
|
|
if _song_generator is None and SongGenerator is not None:
|
|
try:
|
|
_song_generator = SongGenerator()
|
|
except Exception as e:
|
|
_log_error(e, context="get_song_generator")
|
|
raise DependencyError("SongGenerator", {"original_error": str(e)})
|
|
elif SongGenerator is None:
|
|
raise DependencyError("SongGenerator")
|
|
return _song_generator
|
|
|
|
|
|
def _ensure_song_generator() -> 'SongGenerator':
|
|
"""Ensure SongGenerator is available, raise DependencyError if not."""
|
|
if SongGenerator is None:
|
|
raise DependencyError("SongGenerator")
|
|
return get_song_generator()
|
|
|
|
|
|
def get_reference_listener() -> Optional['ReferenceAudioListener']:
|
|
"""Obtiene el analizador de referencia basado en audio."""
|
|
global _reference_listener
|
|
if _reference_listener is None and ReferenceAudioListener is not None:
|
|
try:
|
|
_reference_listener = ReferenceAudioListener(SAMPLES_DIR)
|
|
except Exception as e:
|
|
_log_error(e, context="get_reference_listener")
|
|
return None
|
|
return _reference_listener
|
|
|
|
|
|
def get_audio_resampler() -> Optional['AudioResampler']:
|
|
"""Obtiene el generador de transiciones derivadas desde audio."""
|
|
global _audio_resampler
|
|
if _audio_resampler is None and AudioResampler is not None:
|
|
try:
|
|
_audio_resampler = AudioResampler()
|
|
except Exception as e:
|
|
_log_error(e, context="get_audio_resampler")
|
|
return None
|
|
return _audio_resampler
|
|
|
|
|
|
def _send_ableton_command_safe(ableton: AbletonConnection, command: str, params: Dict[str, Any] = None, timeout: float = 15.0) -> Dict[str, Any]:
|
|
"""Send a command to Ableton with proper error handling."""
|
|
try:
|
|
response = ableton.send_command(command, params, timeout=timeout)
|
|
if _is_error_response(response):
|
|
raise AbletonResponseError(command, response)
|
|
return response
|
|
except MCPError:
|
|
raise
|
|
except Exception as e:
|
|
_log_error(e, context=f"_send_ableton_command_safe({command})")
|
|
raise ConnectionError(f"Failed to send command '{command}': {e}")
|
|
|
|
|
|
@asynccontextmanager
|
|
async def server_lifespan(server: FastMCP) -> AsyncIterator[Dict[str, Any]]:
|
|
"""Maneja el ciclo de vida del servidor"""
|
|
try:
|
|
logger.info("AbletonMCP-AI Server iniciando...")
|
|
|
|
# T014: Cargar sample history persistente
|
|
_load_sample_history()
|
|
|
|
# T029: Cargar Coverage Wheel
|
|
_load_coverage_wheel()
|
|
|
|
# T021: Cargar sistema de fatiga de samples
|
|
_load_sample_fatigue()
|
|
|
|
# Intentar conectar a Ableton
|
|
try:
|
|
ableton = get_ableton_connection()
|
|
if ableton.connect():
|
|
logger.info("✓ Conectado a Ableton Live")
|
|
else:
|
|
logger.warning("⚠ No se pudo conectar a Ableton (¿está abierto el script?)")
|
|
except Exception as e:
|
|
logger.warning(f"⚠ Error conectando a Ableton: {e}")
|
|
|
|
# Inicializar índice de samples (legacy)
|
|
try:
|
|
sample_index = get_sample_index()
|
|
logger.info(f"✓ Índice de samples cargado: {len(sample_index.samples)} samples")
|
|
except Exception as e:
|
|
logger.warning(f"⚠ Error cargando índice de samples: {e}")
|
|
|
|
# Inicializar nuevo sistema de samples
|
|
try:
|
|
sample_manager = get_sample_manager()
|
|
if sample_manager:
|
|
logger.info("✓ Sistema de samples inicializado")
|
|
# Escanear si está vacío
|
|
if len(sample_manager.samples) == 0:
|
|
logger.info("Escaneando librería de samples...")
|
|
stats = sample_manager.scan_directory()
|
|
logger.info(f" → {stats['added']} samples agregados")
|
|
except Exception as e:
|
|
logger.warning(f"⚠ Error inicializando sistema de samples: {e}")
|
|
|
|
try:
|
|
installed_device = ensure_m4l_sampler_device_installed()
|
|
logger.info(f"✓ Device M4L instalado: {installed_device}")
|
|
except Exception as e:
|
|
logger.warning(f"⚠ Error instalando device M4L: {e}")
|
|
|
|
yield {}
|
|
|
|
finally:
|
|
global _ableton_connection
|
|
if _ableton_connection:
|
|
logger.info("Desconectando de Ableton...")
|
|
_ableton_connection.disconnect()
|
|
_ableton_connection = None
|
|
|
|
# T014: Guardar sample history al detener
|
|
_save_sample_history()
|
|
|
|
# T029: Guardar Coverage Wheel al detener
|
|
_save_coverage_wheel()
|
|
|
|
# T021: Guardar fatiga de samples al detener
|
|
_save_sample_fatigue()
|
|
|
|
logger.info("AbletonMCP-AI Server detenido")
|
|
|
|
|
|
# Crear el servidor MCP
|
|
mcp = FastMCP(
|
|
"AbletonMCP-AI",
|
|
instructions=PRODUCER_INSTRUCTIONS,
|
|
lifespan=server_lifespan
|
|
)
|
|
|
|
|
|
# ============================================================================
|
|
# HERRAMIENTAS MCP - Información
|
|
# ============================================================================
|
|
|
|
@mcp.tool()
|
|
def get_session_info(ctx: Context) -> str:
|
|
"""Obtiene información de la sesión actual de Ableton"""
|
|
try:
|
|
ableton = get_ableton_connection()
|
|
response = ableton.send_command("get_session_info")
|
|
|
|
if response.get("status") == "success":
|
|
result = response["result"]
|
|
return json.dumps(result, indent=2)
|
|
else:
|
|
return f"Error: {response.get('message', 'Unknown error')}"
|
|
|
|
except Exception as e:
|
|
return f"Error obteniendo información: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def get_tracks(ctx: Context) -> str:
|
|
"""Lista todos los tracks en la sesión actual"""
|
|
try:
|
|
ableton = get_ableton_connection()
|
|
response = ableton.send_command("get_tracks")
|
|
|
|
if response.get("status") == "success":
|
|
tracks = response["result"]
|
|
return json.dumps(tracks, indent=2)
|
|
else:
|
|
return _handle_tool_error(
|
|
AbletonResponseError("get_tracks", response),
|
|
"get_tracks"
|
|
)
|
|
|
|
except MCPError as e:
|
|
return _handle_tool_error(e, "get_tracks")
|
|
except Exception as e:
|
|
return _handle_tool_error(e, "get_tracks")
|
|
|
|
|
|
@mcp.tool()
|
|
def get_track_info(ctx: Context, track_index: int) -> str:
|
|
"""Obtiene información detallada de un track específico"""
|
|
try:
|
|
# Validate parameter
|
|
track_index = _validate_int(track_index, "track_index", min_val=0)
|
|
|
|
ableton = get_ableton_connection()
|
|
tracks_response = ableton.send_command("get_tracks")
|
|
|
|
if _is_error_response(tracks_response):
|
|
return _handle_tool_error(
|
|
AbletonResponseError("get_tracks", tracks_response),
|
|
"get_track_info"
|
|
)
|
|
|
|
tracks = tracks_response.get("result", [])
|
|
if track_index >= len(tracks):
|
|
return _handle_tool_error(
|
|
ValidationError("track_index", track_index, f"index < {len(tracks)} (number of tracks)"),
|
|
"get_track_info"
|
|
)
|
|
|
|
track_info = dict(tracks[track_index])
|
|
|
|
clips_response = ableton.send_command("get_clips", {"track_index": track_index})
|
|
if not _is_error_response(clips_response):
|
|
track_info["clips"] = clips_response.get("result", [])
|
|
|
|
devices_response = ableton.send_command("get_devices", {"track_index": track_index})
|
|
if not _is_error_response(devices_response):
|
|
track_info["devices"] = devices_response.get("result", [])
|
|
|
|
return json.dumps(track_info, indent=2)
|
|
|
|
except MCPError as e:
|
|
return _handle_tool_error(e, "get_track_info")
|
|
except Exception as e:
|
|
return _handle_tool_error(e, "get_track_info")
|
|
|
|
|
|
# ============================================================================
|
|
# HERRAMIENTAS MCP - Creación de Tracks
|
|
# ============================================================================
|
|
|
|
@mcp.tool()
|
|
def create_midi_track(ctx: Context, index: int = -1, name: str = "MIDI Track") -> str:
|
|
"""Crea un nuevo track MIDI"""
|
|
try:
|
|
# Validate parameters
|
|
index = _validate_int(index, "index", min_val=-1)
|
|
name = _validate_string(name, "name", allow_empty=True)
|
|
|
|
ableton = get_ableton_connection()
|
|
response = ableton.send_command("create_midi_track", {"index": index})
|
|
|
|
if response.get("status") == "success":
|
|
# Setear nombre si se proporcionó
|
|
if name:
|
|
track_idx = response["result"].get("index", index if index >= 0 else 0)
|
|
try:
|
|
ableton.send_command("set_track_name", {
|
|
"track_index": track_idx,
|
|
"name": name
|
|
})
|
|
except Exception as e:
|
|
_log_error(e, context="create_midi_track:set_track_name")
|
|
return f"Track MIDI '{name}' creado exitosamente"
|
|
else:
|
|
return _handle_tool_error(
|
|
AbletonResponseError("create_midi_track", response),
|
|
"create_midi_track"
|
|
)
|
|
|
|
except MCPError as e:
|
|
return _handle_tool_error(e, "create_midi_track")
|
|
except Exception as e:
|
|
return _handle_tool_error(e, "create_midi_track")
|
|
|
|
|
|
@mcp.tool()
|
|
def create_audio_track(ctx: Context, index: int = -1, name: str = "Audio Track") -> str:
|
|
"""Crea un nuevo track de audio"""
|
|
try:
|
|
# Validate parameters
|
|
index = _validate_int(index, "index", min_val=-1)
|
|
name = _validate_string(name, "name", allow_empty=True)
|
|
|
|
ableton = get_ableton_connection()
|
|
response = ableton.send_command("create_audio_track", {"index": index})
|
|
|
|
if response.get("status") == "success":
|
|
if name:
|
|
track_idx = response["result"].get("index", index if index >= 0 else 0)
|
|
try:
|
|
ableton.send_command("set_track_name", {
|
|
"track_index": track_idx,
|
|
"name": name
|
|
})
|
|
except Exception as e:
|
|
_log_error(e, context="create_audio_track:set_track_name")
|
|
return f"Track de audio '{name}' creado exitosamente"
|
|
else:
|
|
return _handle_tool_error(
|
|
AbletonResponseError("create_audio_track", response),
|
|
"create_audio_track"
|
|
)
|
|
|
|
except MCPError as e:
|
|
return _handle_tool_error(e, "create_audio_track")
|
|
except Exception as e:
|
|
return _handle_tool_error(e, "create_audio_track")
|
|
|
|
|
|
@mcp.tool()
|
|
def set_track_name(ctx: Context, track_index: int, name: str) -> str:
|
|
"""Cambia el nombre de un track"""
|
|
try:
|
|
# Validate parameters
|
|
track_index = _validate_int(track_index, "track_index", min_val=0)
|
|
name = _validate_string(name, "name", allow_empty=False)
|
|
|
|
ableton = get_ableton_connection()
|
|
response = ableton.send_command("set_track_name", {
|
|
"track_index": track_index,
|
|
"name": name
|
|
})
|
|
|
|
if response.get("status") == "success":
|
|
return f"Track {track_index} renombrado a '{name}'"
|
|
else:
|
|
return _handle_tool_error(
|
|
AbletonResponseError("set_track_name", response),
|
|
"set_track_name"
|
|
)
|
|
|
|
except MCPError as e:
|
|
return _handle_tool_error(e, "set_track_name")
|
|
except Exception as e:
|
|
return _handle_tool_error(e, "set_track_name")
|
|
|
|
|
|
@mcp.tool()
|
|
def set_track_color(ctx: Context, track_index: int, color: int) -> str:
|
|
"""
|
|
Cambia el color de un track (0-69)
|
|
|
|
Colores comunes:
|
|
- 0-9: Rojos
|
|
- 10-19: Naranjas/Amarillos
|
|
- 20-29: Verdes
|
|
- 30-39: Azules
|
|
- 40-49: Morados/Rosas
|
|
- 50-59: Grises
|
|
- 60-69: Especiales
|
|
"""
|
|
try:
|
|
# Validate parameters
|
|
track_index = _validate_int(track_index, "track_index", min_val=0)
|
|
color = _validate_int(color, "color", min_val=0, max_val=69)
|
|
|
|
ableton = get_ableton_connection()
|
|
response = ableton.send_command("set_track_color", {
|
|
"track_index": track_index,
|
|
"color": color
|
|
})
|
|
|
|
if response.get("status") == "success":
|
|
return f"Color del track {track_index} actualizado"
|
|
else:
|
|
return _handle_tool_error(
|
|
AbletonResponseError("set_track_color", response),
|
|
"set_track_color"
|
|
)
|
|
|
|
except MCPError as e:
|
|
return _handle_tool_error(e, "set_track_color")
|
|
except Exception as e:
|
|
return _handle_tool_error(e, "set_track_color")
|
|
|
|
|
|
@mcp.tool()
|
|
def set_track_volume(ctx: Context, track_index: int, volume: float, track_type: str = "track") -> str:
|
|
"""
|
|
Ajusta el volumen de un track (0.0 - 1.0)
|
|
|
|
Valores típicos:
|
|
- 0.0: Silencio
|
|
- 0.5: -6dB
|
|
- 0.7: -3dB
|
|
- 0.85: 0dB (unity)
|
|
- 1.0: +6dB
|
|
"""
|
|
try:
|
|
ableton = get_ableton_connection()
|
|
response = ableton.send_command("set_track_volume", {
|
|
"track_index": track_index,
|
|
"track_type": track_type,
|
|
"volume": volume
|
|
})
|
|
|
|
if response.get("status") == "success":
|
|
db = 20 * (volume - 0.85) / 0.85 # Aproximación
|
|
target_label = "return" if str(track_type).lower() == "return" else "track"
|
|
return f"✓ Volumen del {target_label} {track_index} ajustado ({volume:.2f}, ~{db:+.1f}dB)"
|
|
else:
|
|
return f"✗ Error: {response.get('message')}"
|
|
|
|
except Exception as e:
|
|
return f"✗ Error: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def set_track_pan(ctx: Context, track_index: int, pan: float, track_type: str = "track") -> str:
|
|
"""
|
|
Ajusta el paneo de un track (-1.0 a 1.0)
|
|
|
|
Valores:
|
|
- -1.0: Izquierda completa
|
|
- 0.0: Centro
|
|
- 1.0: Derecha completa
|
|
"""
|
|
try:
|
|
ableton = get_ableton_connection()
|
|
response = ableton.send_command("set_track_pan", {
|
|
"track_index": track_index,
|
|
"track_type": track_type,
|
|
"pan": pan
|
|
})
|
|
|
|
if response.get("status") == "success":
|
|
pos = "centro" if pan == 0 else f"{'izq' if pan < 0 else 'der'} {abs(pan)*100:.0f}%"
|
|
target_label = "return" if str(track_type).lower() == "return" else "track"
|
|
return f"✓ Paneo del {target_label} {track_index}: {pos}"
|
|
else:
|
|
return f"✗ Error: {response.get('message')}"
|
|
|
|
except Exception as e:
|
|
return f"✗ Error: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def set_track_send(ctx: Context, track_index: int, send_index: int, value: float, track_type: str = "track") -> str:
|
|
"""
|
|
Ajusta el nivel de un send de un track (0.0 - 1.0)
|
|
"""
|
|
try:
|
|
ableton = get_ableton_connection()
|
|
response = ableton.send_command("set_track_send", {
|
|
"track_index": track_index,
|
|
"track_type": track_type,
|
|
"send_index": send_index,
|
|
"value": max(0.0, min(1.0, value))
|
|
})
|
|
|
|
if response.get("status") == "success":
|
|
target_label = "return" if str(track_type).lower() == "return" else "track"
|
|
return f"✓ Send {send_index} del {target_label} {track_index} ajustado a {value:.2f}"
|
|
else:
|
|
return f"✗ Error: {response.get('message')}"
|
|
|
|
except Exception as e:
|
|
return f"✗ Error: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def set_track_mute(ctx: Context, track_index: int, mute: bool, track_type: str = "track") -> str:
|
|
"""Activa/desactiva mute de un track"""
|
|
try:
|
|
ableton = get_ableton_connection()
|
|
response = ableton.send_command("set_track_mute", {
|
|
"track_index": track_index,
|
|
"track_type": track_type,
|
|
"mute": mute
|
|
})
|
|
|
|
if response.get("status") == "success":
|
|
estado = "muteado" if mute else "desmuteado"
|
|
target_label = "Return" if str(track_type).lower() == "return" else "Track"
|
|
return f"✓ {target_label} {track_index} {estado}"
|
|
else:
|
|
return f"✗ Error: {response.get('message')}"
|
|
|
|
except Exception as e:
|
|
return f"✗ Error: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def set_track_solo(ctx: Context, track_index: int, solo: bool) -> str:
|
|
"""Activa/desactiva solo de un track"""
|
|
try:
|
|
ableton = get_ableton_connection()
|
|
response = ableton.send_command("set_track_solo", {
|
|
"track_index": track_index,
|
|
"solo": solo
|
|
})
|
|
|
|
if response.get("status") == "success":
|
|
estado = "en solo" if solo else "sin solo"
|
|
return f"✓ Track {track_index} {estado}"
|
|
else:
|
|
return f"✗ Error: {response.get('message')}"
|
|
|
|
except Exception as e:
|
|
return f"✗ Error: {str(e)}"
|
|
|
|
|
|
# ============================================================================
|
|
# HERRAMIENTAS MCP - Clips y Notas
|
|
# ============================================================================
|
|
|
|
@mcp.tool()
|
|
def create_clip(ctx: Context, track_index: int, clip_index: int, length: float = 4.0, name: str = "") -> str:
|
|
"""
|
|
Crea un clip MIDI en un slot específico
|
|
|
|
Args:
|
|
track_index: Índice del track
|
|
clip_index: Índice del slot/scene
|
|
length: Duración en beats (default 4.0 = 1 compás)
|
|
name: Nombre opcional para el clip
|
|
"""
|
|
try:
|
|
ableton = get_ableton_connection()
|
|
|
|
# Crear clip
|
|
response = ableton.send_command("create_clip", {
|
|
"track_index": track_index,
|
|
"clip_index": clip_index,
|
|
"length": length
|
|
})
|
|
|
|
if response.get("status") == "success":
|
|
# Setear nombre si se proporcionó
|
|
if name:
|
|
ableton.send_command("set_clip_name", {
|
|
"track_index": track_index,
|
|
"clip_index": clip_index,
|
|
"name": name
|
|
})
|
|
|
|
return f"✓ Clip creado en track {track_index}, slot {clip_index} ({length} beats)"
|
|
else:
|
|
return f"✗ Error: {response.get('message')}"
|
|
|
|
except Exception as e:
|
|
return f"✗ Error creando clip: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def add_notes_to_clip(
|
|
ctx: Context,
|
|
track_index: int,
|
|
clip_index: int,
|
|
notes: str
|
|
) -> str:
|
|
"""
|
|
Agrega notas MIDI a un clip existente
|
|
|
|
Args:
|
|
track_index: Índice del track
|
|
clip_index: Índice del clip/slot
|
|
notes: JSON array de notas [{"pitch": 60, "start": 0.0, "duration": 0.25, "velocity": 100}, ...]
|
|
|
|
Notas MIDI comunes:
|
|
- C1 (36): Kick
|
|
- D1 (38): Snare
|
|
- F#1 (42): Closed Hi-hat
|
|
- A#1 (46): Open Hi-hat
|
|
- D2 (50): Clap
|
|
- C3 (60): C central
|
|
"""
|
|
try:
|
|
notes_list = json.loads(notes)
|
|
|
|
ableton = get_ableton_connection()
|
|
response = ableton.send_command("add_notes_to_clip", {
|
|
"track_index": track_index,
|
|
"clip_index": clip_index,
|
|
"notes": notes_list
|
|
})
|
|
|
|
if response.get("status") == "success":
|
|
result = response.get("result", {})
|
|
count = result.get("num_notes_added", result.get("notes_added", len(notes_list)))
|
|
return f"✓ {count} notas agregadas al clip"
|
|
else:
|
|
return f"✗ Error: {response.get('message')}"
|
|
|
|
except json.JSONDecodeError:
|
|
return "✗ Error: El parámetro 'notes' debe ser un JSON válido"
|
|
except Exception as e:
|
|
return f"✗ Error: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def fire_clip(ctx: Context, track_index: int, clip_index: int) -> str:
|
|
"""Dispara/reproduce un clip específico"""
|
|
try:
|
|
ableton = get_ableton_connection()
|
|
response = ableton.send_command("fire_clip", {
|
|
"track_index": track_index,
|
|
"clip_index": clip_index
|
|
})
|
|
|
|
if response.get("status") == "success":
|
|
return f"▶ Clip en track {track_index}, slot {clip_index} disparado"
|
|
else:
|
|
return f"✗ Error: {response.get('message')}"
|
|
|
|
except Exception as e:
|
|
return f"✗ Error: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def stop_clip(ctx: Context, track_index: int, clip_index: int) -> str:
|
|
"""Detiene un clip específico"""
|
|
try:
|
|
ableton = get_ableton_connection()
|
|
response = ableton.send_command("stop_clip", {
|
|
"track_index": track_index,
|
|
"clip_index": clip_index
|
|
})
|
|
|
|
if response.get("status") == "success":
|
|
return f"⏹ Clip en track {track_index}, slot {clip_index} detenido"
|
|
else:
|
|
return f"✗ Error: {response.get('message')}"
|
|
|
|
except Exception as e:
|
|
return f"✗ Error: {str(e)}"
|
|
|
|
|
|
# ============================================================================
|
|
# HERRAMIENTAS MCP - Transporte y Tempo
|
|
# ============================================================================
|
|
|
|
@mcp.tool()
|
|
def set_tempo(ctx: Context, tempo: float) -> str:
|
|
"""
|
|
Cambia el BPM/tempo de la sesión
|
|
|
|
Rangos típicos por género:
|
|
- Techno: 125-140 BPM
|
|
- House: 120-128 BPM
|
|
- Tech-House: 124-128 BPM
|
|
- Trance: 135-150 BPM
|
|
- Drum & Bass: 160-180 BPM
|
|
"""
|
|
try:
|
|
ableton = get_ableton_connection()
|
|
response = ableton.send_command("set_tempo", {"tempo": tempo})
|
|
|
|
if response.get("status") == "success":
|
|
return f"♩ Tempo cambiado a {tempo} BPM"
|
|
else:
|
|
return f"✗ Error: {response.get('message')}"
|
|
|
|
except Exception as e:
|
|
return f"✗ Error: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def start_playback(ctx: Context) -> str:
|
|
"""Inicia la reproducción"""
|
|
try:
|
|
ableton = get_ableton_connection()
|
|
response = ableton.send_command("start_playback")
|
|
|
|
if response.get("status") == "success":
|
|
try:
|
|
send_m4l_sampler_command("start")
|
|
except Exception:
|
|
pass
|
|
return "▶ Reproducción iniciada"
|
|
else:
|
|
return f"✗ Error: {response.get('message')}"
|
|
|
|
except Exception as e:
|
|
return f"✗ Error: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def stop_playback(ctx: Context) -> str:
|
|
"""Detiene la reproducción"""
|
|
try:
|
|
ableton = get_ableton_connection()
|
|
response = ableton.send_command("stop_playback")
|
|
|
|
if response.get("status") == "success":
|
|
try:
|
|
send_m4l_sampler_command("stop")
|
|
except Exception:
|
|
pass
|
|
return "⏹ Reproducción detenida"
|
|
else:
|
|
return f"✗ Error: {response.get('message')}"
|
|
|
|
except Exception as e:
|
|
return f"✗ Error: {str(e)}"
|
|
|
|
|
|
# ============================================================================
|
|
# HERRAMIENTAS MCP - Scenes
|
|
# ============================================================================
|
|
|
|
@mcp.tool()
|
|
def create_scene(ctx: Context, index: int = -1, name: str = "") -> str:
|
|
"""Crea una nueva scene"""
|
|
try:
|
|
ableton = get_ableton_connection()
|
|
response = ableton.send_command("create_scene", {"index": index})
|
|
|
|
if response.get("status") == "success":
|
|
# Setear nombre si se proporcionó
|
|
if name:
|
|
scene_idx = response["result"].get("index", index if index >= 0 else 0)
|
|
ableton.send_command("set_scene_name", {
|
|
"scene_index": scene_idx,
|
|
"name": name
|
|
})
|
|
return f"✓ Scene '{name}' creada" if name else "✓ Scene creada"
|
|
else:
|
|
return f"✗ Error: {response.get('message')}"
|
|
|
|
except Exception as e:
|
|
return f"✗ Error: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def set_scene_name(ctx: Context, scene_index: int, name: str) -> str:
|
|
"""Cambia el nombre de una scene"""
|
|
try:
|
|
ableton = get_ableton_connection()
|
|
response = ableton.send_command("set_scene_name", {
|
|
"scene_index": scene_index,
|
|
"name": name
|
|
})
|
|
|
|
if response.get("status") == "success":
|
|
return f"✓ Scene {scene_index} renombrada a '{name}'"
|
|
else:
|
|
return f"✗ Error: {response.get('message')}"
|
|
|
|
except Exception as e:
|
|
return f"✗ Error: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def fire_scene(ctx: Context, scene_index: int) -> str:
|
|
"""Dispara una scene (todos sus clips)"""
|
|
try:
|
|
ableton = get_ableton_connection()
|
|
response = ableton.send_command("fire_scene", {"scene_index": scene_index})
|
|
|
|
if response.get("status") == "success":
|
|
return f"▶ Scene {scene_index} disparada"
|
|
else:
|
|
return f"✗ Error: {response.get('message')}"
|
|
|
|
except Exception as e:
|
|
return f"✗ Error: {str(e)}"
|
|
|
|
|
|
# ============================================================================
|
|
# HERRAMIENTAS MCP - Generación Musical (AI)
|
|
# ============================================================================
|
|
|
|
@mcp.tool()
|
|
def generate_track(
|
|
ctx: Context,
|
|
genre: str,
|
|
style: str = "",
|
|
bpm: float = 0,
|
|
key: str = "",
|
|
structure: str = "standard"
|
|
) -> str:
|
|
"""
|
|
Genera un track completo con IA basado en parámetros musicales
|
|
|
|
Args:
|
|
genre: Género musical (techno, house, trance, tech-house, drum-and-bass)
|
|
style: Sub-género o estilo específico (e.g., "industrial", "deep", "90s", "minimal")
|
|
bpm: BPM deseado (0 = auto-seleccionar según género)
|
|
key: Tonalidad (e.g., "Am", "F#m", "C") - vacío = auto-seleccionar
|
|
structure: Estructura del track (standard, minimal, extended)
|
|
|
|
Ejemplos:
|
|
- generate_track("techno", "industrial", 138, "F#m")
|
|
- generate_track("house", "deep", 124, "Am")
|
|
- generate_track("tech-house", "groovy", 126)
|
|
"""
|
|
try:
|
|
if SongGenerator is None:
|
|
return "✗ Error: Módulo song_generator no disponible"
|
|
|
|
generator = get_song_generator()
|
|
|
|
# Iniciar tracking de esta generación
|
|
selector = get_sample_selector()
|
|
if hasattr(selector, 'start_generation_tracking'):
|
|
selector.start_generation_tracking()
|
|
listener = get_reference_listener()
|
|
if listener is not None and hasattr(listener, 'start_generation_tracking'):
|
|
listener.start_generation_tracking()
|
|
|
|
# Generar configuración del track
|
|
config = generator.generate_config(genre, style, bpm, key, structure)
|
|
|
|
# Log section variants
|
|
sections = config.get("sections", []) or []
|
|
if sections:
|
|
logger.info("SECTION_VARIANTS: %d sections generated", len(sections))
|
|
for i, section in enumerate(sections[:5]): # First 5
|
|
kind = section.get('kind', 'unknown')
|
|
drum_var = section.get('drum_variant', 'default')
|
|
bass_var = section.get('bass_variant', 'default')
|
|
mel_var = section.get('melodic_variant', 'default')
|
|
logger.info(" Section %d (%s): drum=%s, bass=%s, melodic=%s",
|
|
i, kind, drum_var, bass_var, mel_var)
|
|
if len(sections) > 5:
|
|
logger.info(" ... and %d more sections", len(sections) - 5)
|
|
|
|
# Log pattern bank usage if available
|
|
if 'pattern_bank_hits' in config:
|
|
logger.debug("PATTERN_BANK: %d patterns from bank",
|
|
sum(config['pattern_bank_hits'].values()))
|
|
|
|
# Log gain staging summary if available
|
|
_log_gain_staging_summary(config)
|
|
|
|
reference_audio_plan = _build_reference_audio_plan(config)
|
|
total_beats = int(config.get("total_beats", 16) or 16)
|
|
runtime_config = dict(config)
|
|
runtime_config.pop("reference_audio_plan", None)
|
|
|
|
# Enviar a Ableton
|
|
ableton = get_ableton_connection()
|
|
response = ableton.send_command("generate_track", runtime_config)
|
|
|
|
if response.get("status") == "success":
|
|
runtime_result = response.get("result", {})
|
|
runtime_bpm = runtime_result.get("bpm", config.get("bpm", bpm))
|
|
runtime_key = runtime_result.get("key", config.get("key", key))
|
|
resolved_genre = str(config.get("genre", genre)).strip()
|
|
resolved_style = str(config.get("style", style)).strip()
|
|
title_parts = [resolved_genre.title()]
|
|
if resolved_style:
|
|
title_parts.append(resolved_style.title())
|
|
|
|
parts = ["✓ Track generado exitosamente!"]
|
|
parts.append(f"Tema: {' '.join(title_parts)}")
|
|
parts.append(f"BPM: {runtime_bpm}")
|
|
|
|
resolved_key = runtime_key
|
|
if resolved_key:
|
|
parts.append(f"Key: {resolved_key}")
|
|
|
|
if resolved_style:
|
|
parts.append(f"Style: {resolved_style}")
|
|
if config.get("arrangement_profile"):
|
|
parts.append(f"Profile: {config['arrangement_profile']}")
|
|
if config.get("reference_track"):
|
|
parts.append(f"Referencia: {config['reference_track'].get('name')}")
|
|
|
|
actual_tracks = runtime_result.get("tracks")
|
|
actual_scenes = runtime_result.get("scenes")
|
|
actual_returns = runtime_result.get("return_tracks")
|
|
actual_cue_points = runtime_result.get("cue_points")
|
|
actual_structure = runtime_result.get("structure", structure)
|
|
playback_mode = runtime_result.get("playback_mode", "session")
|
|
arrangement_result = ""
|
|
marker_result = ""
|
|
hybrid_result = ""
|
|
bus_result = ""
|
|
master_result = ""
|
|
|
|
def refresh_runtime_counts() -> None:
|
|
nonlocal actual_tracks, actual_scenes, actual_returns, actual_cue_points
|
|
session_response = ableton.send_command("get_session_info")
|
|
if _is_error_response(session_response):
|
|
return
|
|
session_info = session_response.get("result", {})
|
|
actual_tracks = session_info.get("num_tracks", actual_tracks)
|
|
actual_scenes = session_info.get("num_scenes", actual_scenes)
|
|
actual_returns = session_info.get("num_return_tracks", actual_returns)
|
|
actual_cue_points = session_info.get("num_cue_points", actual_cue_points)
|
|
|
|
if reference_audio_plan:
|
|
reference_info = reference_audio_plan.get("reference", {})
|
|
parts.append(f"Referencia escuchada con: {reference_info.get('device', 'numpy')}")
|
|
if reference_info.get("variant_seed") is not None:
|
|
parts.append(f"Variante: {reference_info.get('variant_seed')}")
|
|
|
|
if runtime_result.get("requires_arrangement_commit"):
|
|
arrangement_result = commit_session_blueprint_to_arrangement(ableton, config)
|
|
playback_mode = "arrangement"
|
|
refresh_runtime_counts()
|
|
|
|
if reference_audio_plan:
|
|
try:
|
|
fallback_result = setup_audio_sample_fallback(
|
|
genre=resolved_genre,
|
|
style=resolved_style,
|
|
key=resolved_key or "",
|
|
bpm=float(runtime_bpm) if runtime_bpm else 0,
|
|
total_beats=total_beats,
|
|
config=config,
|
|
)
|
|
hybrid_result = "\n".join([item for item in [hybrid_result, fallback_result] if item])
|
|
playback_mode = "arrangement"
|
|
refresh_runtime_counts()
|
|
except Exception as audio_fallback_error:
|
|
fallback_error = f"Audio reference fallback no disponible: {audio_fallback_error}"
|
|
hybrid_result = "\n".join([item for item in [hybrid_result, fallback_error] if item])
|
|
else:
|
|
# Sin reference_audio_plan: intentar hybrid sampler o fallback estandar
|
|
try:
|
|
hybrid_result = setup_hybrid_m4l_sampler(
|
|
genre=resolved_genre,
|
|
style=resolved_style,
|
|
key=resolved_key or "",
|
|
bpm=float(runtime_bpm) if runtime_bpm else 0,
|
|
)
|
|
if hybrid_result:
|
|
refresh_runtime_counts()
|
|
except Exception as hybrid_error:
|
|
hybrid_result = f"Modo híbrido no disponible: {hybrid_error}"
|
|
try:
|
|
fallback_result = setup_audio_sample_fallback(
|
|
genre=resolved_genre,
|
|
style=resolved_style,
|
|
key=resolved_key or "",
|
|
bpm=float(runtime_bpm) if runtime_bpm else 0,
|
|
total_beats=total_beats,
|
|
config=config,
|
|
)
|
|
hybrid_result = "\n".join([item for item in [hybrid_result, fallback_result] if item])
|
|
playback_mode = "arrangement"
|
|
refresh_runtime_counts()
|
|
except Exception as audio_fallback_error:
|
|
hybrid_result = "\n".join([
|
|
item for item in [
|
|
hybrid_result,
|
|
f"Audio fallback no disponible: {audio_fallback_error}",
|
|
] if item
|
|
])
|
|
|
|
if playback_mode == "arrangement":
|
|
try:
|
|
marker_result = apply_arrangement_markers(ableton, config)
|
|
refresh_runtime_counts()
|
|
except Exception as marker_error:
|
|
marker_result = f"Markers de Arrangement no disponibles: {marker_error}"
|
|
|
|
try:
|
|
resampler = get_audio_resampler()
|
|
if resampler is not None:
|
|
sections = config.get("sections", [])
|
|
derived_layers = resampler.build_transition_layers(
|
|
{"matches": {}},
|
|
sections,
|
|
float(runtime_bpm) if runtime_bpm else 138.0,
|
|
)
|
|
if derived_layers:
|
|
logger.info("Creating %d derived FX layers from local library", len(derived_layers))
|
|
for layer in derived_layers:
|
|
try:
|
|
create_response = ableton.send_command("create_audio_track", {"index": -1})
|
|
if _is_error_response(create_response):
|
|
continue
|
|
track_index = create_response.get("result", {}).get("index")
|
|
if track_index is None:
|
|
continue
|
|
ableton.send_command("set_track_name", {"track_index": track_index, "name": layer["name"]})
|
|
ableton.send_command("set_track_color", {"track_index": track_index, "color": layer.get("color", 20)})
|
|
ableton.send_command("set_track_volume", {"track_index": track_index, "volume": _linear_to_live_slider(layer.get("volume", 0.5))})
|
|
ableton.send_command("create_arrangement_audio_pattern", {
|
|
"track_index": track_index,
|
|
"file_path": layer["file_path"],
|
|
"positions": layer["positions"],
|
|
"name": layer["name"],
|
|
})
|
|
hybrid_result = f"{hybrid_result}\n{layer['name']}: {Path(layer['file_path']).name}" if hybrid_result else f"{layer['name']}: {Path(layer['file_path']).name}"
|
|
except Exception as layer_error:
|
|
logger.warning("Failed to create derived layer %s: %s", layer.get("name"), layer_error)
|
|
refresh_runtime_counts()
|
|
except Exception as resample_error:
|
|
logger.warning("Derived FX layers no disponibles: %s", resample_error)
|
|
|
|
try:
|
|
bus_result = apply_mix_bus_architecture(ableton, config)
|
|
if bus_result:
|
|
refresh_runtime_counts()
|
|
except Exception as bus_error:
|
|
bus_result = f"Mix buses no disponibles: {bus_error}"
|
|
|
|
try:
|
|
master_result = apply_master_chain(ableton, config)
|
|
except Exception as master_error:
|
|
master_result = f"Master chain no disponible: {master_error}"
|
|
|
|
if actual_tracks is not None:
|
|
parts.append(f"Tracks reales: {actual_tracks}")
|
|
if actual_scenes is not None:
|
|
parts.append(f"Scenes reales: {actual_scenes}")
|
|
if actual_returns is not None:
|
|
parts.append(f"Returns reales: {actual_returns}")
|
|
if actual_cue_points is not None:
|
|
parts.append(f"Locators reales: {actual_cue_points}")
|
|
if actual_structure:
|
|
parts.append(f"Estructura: {actual_structure}")
|
|
parts.append(f"Playback: {playback_mode}")
|
|
if arrangement_result:
|
|
parts.append(arrangement_result)
|
|
if marker_result:
|
|
parts.append(marker_result)
|
|
if bus_result:
|
|
parts.append(bus_result)
|
|
if master_result:
|
|
parts.append(master_result)
|
|
if hybrid_result:
|
|
parts.append(hybrid_result)
|
|
|
|
# Construir manifest de esta generación usando config real + plan materializado.
|
|
manifest = {
|
|
"timestamp": time.time(),
|
|
"genre": resolved_genre,
|
|
"style": resolved_style,
|
|
"bpm": runtime_bpm,
|
|
"key": resolved_key,
|
|
"structure_name": actual_structure,
|
|
"profile": config.get("arrangement_profile"),
|
|
"playback_mode": playback_mode,
|
|
"reference_path": reference_audio_plan.get("reference", {}).get("path") if reference_audio_plan else None,
|
|
"reference_name": reference_audio_plan.get("reference", {}).get("file_name") if reference_audio_plan else None,
|
|
"reference_device": reference_audio_plan.get("reference", {}).get("device") if reference_audio_plan else None,
|
|
"actual_runtime": {
|
|
"tracks": actual_tracks,
|
|
"scenes": actual_scenes,
|
|
"returns": actual_returns,
|
|
"cue_points": actual_cue_points,
|
|
},
|
|
|
|
# Config structure
|
|
"structure": config.get("structure", actual_structure),
|
|
"sections": [{"kind": s.get("kind"), "name": s.get("name"), "start": s.get("start"), "end": s.get("end")}
|
|
for s in config.get("sections", [])],
|
|
|
|
# Section variant summary
|
|
"section_variant_summary": {
|
|
"total_sections": len(config.get("sections", []) or []),
|
|
"variants_used": {
|
|
"drum": list(set(s.get("drum_variant", "straight") for s in config.get("sections", []) or [])),
|
|
"kick": list(set(s.get("kick_variant", (s.get("drum_role_variants") or {}).get("kick", "straight")) for s in config.get("sections", []) or [])),
|
|
"clap": list(set(s.get("clap_variant", (s.get("drum_role_variants") or {}).get("clap", "straight")) for s in config.get("sections", []) or [])),
|
|
"hat_closed": list(set(s.get("hat_closed_variant", (s.get("drum_role_variants") or {}).get("hat_closed", "straight")) for s in config.get("sections", []) or [])),
|
|
"bass": list(set(s.get("bass_variant", "anchor") for s in config.get("sections", []) or [])),
|
|
"bass_bank": list(set(s.get("bass_bank_variant", s.get("bass_variant", "anchor")) for s in config.get("sections", []) or [])),
|
|
"melodic": list(set(s.get("melodic_variant", "motif") for s in config.get("sections", []) or [])),
|
|
"melodic_bank": list(set(s.get("melodic_bank_variant", s.get("melodic_variant", "motif")) for s in config.get("sections", []) or [])),
|
|
"transition_fill": list(set(s.get("transition_fill", "none") for s in config.get("sections", []) or [])),
|
|
}
|
|
},
|
|
|
|
# Tracks blueprint
|
|
"tracks": [],
|
|
"buses": [],
|
|
"returns": [],
|
|
"muted_replaced_tracks": sorted(_expected_audio_replacement_tracks()),
|
|
|
|
# Audio layers
|
|
"audio_layers": [],
|
|
"resample_layers": [],
|
|
}
|
|
|
|
for track_spec in config.get("tracks", []) or []:
|
|
if not isinstance(track_spec, dict):
|
|
continue
|
|
manifest["tracks"].append({
|
|
"name": track_spec.get("name"),
|
|
"role": track_spec.get("role"),
|
|
"type": track_spec.get("type"),
|
|
"bus": track_spec.get("bus"),
|
|
"device": track_spec.get("device"),
|
|
"color": track_spec.get("color"),
|
|
})
|
|
|
|
for bus_spec in config.get("buses", []) or []:
|
|
if not isinstance(bus_spec, dict):
|
|
continue
|
|
manifest["buses"].append({
|
|
"name": bus_spec.get("name"),
|
|
"key": bus_spec.get("key"),
|
|
"type": bus_spec.get("type"),
|
|
"color": bus_spec.get("color"),
|
|
})
|
|
|
|
for return_spec in config.get("returns", []) or []:
|
|
if not isinstance(return_spec, dict):
|
|
continue
|
|
manifest["returns"].append({
|
|
"name": return_spec.get("name"),
|
|
"send_key": return_spec.get("send_key"),
|
|
"color": return_spec.get("color"),
|
|
})
|
|
|
|
# Extraer reference_audio_plan si existe
|
|
if reference_audio_plan:
|
|
layers = reference_audio_plan.get('layers', [])
|
|
section_samples = reference_audio_plan.get('section_samples', {})
|
|
sections = reference_audio_plan.get('sections', [])
|
|
|
|
# Build section index to name mapping
|
|
section_names = {}
|
|
for idx, section in enumerate(sections):
|
|
if isinstance(section, dict):
|
|
section_key = f"{section.get('kind', '')}_{section.get('name', '')}"
|
|
section_names[idx] = {
|
|
"kind": section.get("kind"),
|
|
"name": section.get("name"),
|
|
"start": section.get("start"),
|
|
"end": section.get("end"),
|
|
}
|
|
|
|
for layer in layers:
|
|
if isinstance(layer, dict):
|
|
# INFO CLAVE: detectar si este layer tiene samples diferentes por sección
|
|
layer_section_sources = {} # section_key -> source_path
|
|
|
|
# Si el layer tiene info de samples por sección
|
|
if section_samples:
|
|
# Map layer name to role
|
|
layer_name = layer.get('name', '')
|
|
layer_role = None
|
|
|
|
# Map layer names to variation roles
|
|
role_mapping = {
|
|
'AUDIO PERC MAIN': 'perc',
|
|
'AUDIO PERC ALT': 'perc_alt',
|
|
'AUDIO TOP LOOP': 'top_loop',
|
|
'AUDIO VOCAL SHOT': 'vocal_shot',
|
|
'AUDIO SYNTH PEAK': 'synth_peak',
|
|
'AUDIO ATMOS': 'atmos',
|
|
}
|
|
|
|
layer_role = role_mapping.get(layer_name)
|
|
|
|
# If we found a matching role, extract section samples
|
|
if layer_role:
|
|
for section_idx, section_samples_dict in section_samples.items():
|
|
if isinstance(section_samples_dict, dict) and section_idx in section_names:
|
|
section_info = section_names[section_idx]
|
|
section_key = f"{section_info['kind']}_{section_info['name']}"
|
|
|
|
# Get the sample for this role in this section
|
|
sample = section_samples_dict.get(layer_role)
|
|
if sample and isinstance(sample, dict):
|
|
sample_path = sample.get('path') or sample.get('file_path')
|
|
if sample_path:
|
|
layer_section_sources[section_key] = {
|
|
"source_path": sample_path,
|
|
"source_file": Path(sample_path).name,
|
|
"section_kind": section_info['kind'],
|
|
"section_name": section_info['name'],
|
|
}
|
|
|
|
layer_info = {
|
|
"track_name": layer.get('name'),
|
|
"name": layer.get('name'),
|
|
"role": layer.get('role'),
|
|
"file_path": layer.get('file_path'),
|
|
"source_path": layer.get('file_path'),
|
|
"source_file": Path(layer.get('file_path', '')).name if layer.get('file_path') else None,
|
|
"section_sources": layer_section_sources, # NUEVO: fuentes reales por sección
|
|
}
|
|
|
|
# Marcar si tiene variants reales
|
|
if len(layer_section_sources) > 1:
|
|
layer_info["has_real_variants"] = True
|
|
layer_info["variant_count"] = len(layer_section_sources)
|
|
|
|
if 'RESAMPLE' in str(layer.get('name', '')):
|
|
manifest["resample_layers"].append(layer_info)
|
|
else:
|
|
manifest["audio_layers"].append(layer_info)
|
|
|
|
# Resumen de variantes
|
|
variant_layers = [layer for layer in manifest["audio_layers"] if layer.get("has_real_variants")]
|
|
manifest["variant_summary"] = {
|
|
"total_layers_with_variants": len(variant_layers),
|
|
"variant_roles": [layer["name"] for layer in variant_layers],
|
|
"total_variants": sum(layer.get("variant_count", 0) for layer in variant_layers)
|
|
}
|
|
|
|
if manifest["variant_summary"]["total_layers_with_variants"] >= 2:
|
|
logger.info("Generation has %d layers with real section variants: %s",
|
|
manifest["variant_summary"]["total_layers_with_variants"],
|
|
", ".join(manifest["variant_summary"]["variant_roles"]))
|
|
|
|
# Add transition event summary
|
|
manifest['transition_event_summary'] = _build_transition_event_summary(config)
|
|
|
|
# Add mix automation summary
|
|
if 'mix_automation_summary' in config:
|
|
manifest['mix_automation_summary'] = config['mix_automation_summary']
|
|
|
|
_store_generation_manifest(manifest)
|
|
logger.info("Generation manifest stored with %d tracks, %d audio layers, %d resample layers, %d transition events",
|
|
len(manifest["tracks"]), len(manifest["audio_layers"]), len(manifest["resample_layers"]),
|
|
manifest.get('transition_event_summary', {}).get('total_events', 0))
|
|
|
|
# Finalizar tracking y actualizar memoria cross-generation
|
|
if hasattr(selector, 'end_generation_tracking'):
|
|
selector.end_generation_tracking()
|
|
if listener is not None and hasattr(listener, 'end_generation_tracking'):
|
|
listener.end_generation_tracking()
|
|
|
|
return "\n".join(parts)
|
|
else:
|
|
return f"✗ Error: {response.get('message')}"
|
|
|
|
except Exception as e:
|
|
return f"✗ Error generando track: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def generate_song(
|
|
ctx: Context,
|
|
genre: str,
|
|
style: str = "",
|
|
bpm: float = 0,
|
|
key: str = "",
|
|
structure: str = "standard",
|
|
auto_play: bool = True,
|
|
apply_automation: bool = True
|
|
) -> str:
|
|
"""
|
|
Genera una cancion completa y organiza las scenes segun el preset elegido.
|
|
|
|
Args:
|
|
genre: Genero musical (tech-house, techno, house, etc.)
|
|
style: Estilo específico
|
|
bpm: BPM (0 = auto)
|
|
key: Tonalidad
|
|
structure: Estructura (standard, minimal, extended)
|
|
auto_play: Iniciar playback automáticamente
|
|
apply_automation: Aplicar fades y volumen automático
|
|
"""
|
|
track_result = generate_track(ctx, genre, style, bpm, key, structure)
|
|
if "Error" in track_result:
|
|
return track_result
|
|
|
|
resolved_structure = structure
|
|
for line in track_result.splitlines():
|
|
if line.startswith("Estructura:"):
|
|
resolved_structure = line.split(":", 1)[1].strip() or structure
|
|
break
|
|
|
|
arrangement_result = arrange_song_structure(ctx, resolved_structure, exact=True)
|
|
|
|
# ============================================================================
|
|
# AUTO-FADES Y VOLUMEN (NUEVO)
|
|
# ============================================================================
|
|
automation_result = ""
|
|
if apply_automation:
|
|
try:
|
|
conn = get_ableton_connection()
|
|
automation_applied = []
|
|
|
|
# Obtener tracks
|
|
tracks_response = conn.send_command("get_all_tracks")
|
|
if isinstance(tracks_response, dict) and tracks_response.get("status") == "ok":
|
|
tracks = tracks_response.get("tracks", [])
|
|
|
|
for track in tracks:
|
|
track_idx = track.get("index")
|
|
track_name = track.get("name", "").lower()
|
|
|
|
# Aplicar fade-in a tracks de intro (kick, bass, hat)
|
|
if any(x in track_name for x in ["kick", "bass", "hat"]):
|
|
try:
|
|
# Fade in de 4 bars en intro
|
|
conn.send_command("write_track_automation", {
|
|
"track_index": track_idx,
|
|
"parameter": "volume",
|
|
"points": [
|
|
{"time": 0, "value": 0.0}, # Inicio silencio
|
|
{"time": 4, "value": 0.85} # 4 bars = volumen normal
|
|
]
|
|
})
|
|
automation_applied.append(f"{track_name}: fade-in 4 bars")
|
|
except:
|
|
pass
|
|
|
|
# Aplicar curva de build en música
|
|
if any(x in track_name for x in ["synth", "pad", "chords", "lead"]):
|
|
try:
|
|
# Build: volumen bajo -> alto en 8 bars (build section)
|
|
conn.send_command("write_track_automation", {
|
|
"track_index": track_idx,
|
|
"parameter": "volume",
|
|
"points": [
|
|
{"time": 32, "value": 0.5}, # Inicio build
|
|
{"time": 40, "value": 0.9} # Fin build (drop)
|
|
]
|
|
})
|
|
automation_applied.append(f"{track_name}: build curve")
|
|
except:
|
|
pass
|
|
|
|
# Aplicar reverb automation en breaks
|
|
if any(x in track_name for x in ["atmos", "pad", "vocal"]):
|
|
try:
|
|
# Break: más reverb en bars 128-160 (break)
|
|
conn.send_command("write_reverb_automation", {
|
|
"track_index": track_idx,
|
|
"parameter": "reverb_wet",
|
|
"points": [
|
|
{"time": 128, "value": 0.0}, # Inicio break
|
|
{"time": 136, "value": 0.4}, # Máximo reverb
|
|
{"time": 152, "value": 0.4}, # Mantener
|
|
{"time": 160, "value": 0.0} # Volver a 0
|
|
]
|
|
})
|
|
automation_applied.append(f"{track_name}: reverb break")
|
|
except:
|
|
pass
|
|
|
|
if automation_applied:
|
|
automation_result = f"🎚️ Automation aplicada ({len(automation_applied)} tracks):\n" + "\n".join([f" - {a}" for a in automation_applied[:5]])
|
|
if len(automation_applied) > 5:
|
|
automation_result += f"\n ... y {len(automation_applied) - 5} más"
|
|
|
|
except Exception as e:
|
|
automation_result = f"⚠️ Automation error: {str(e)}"
|
|
|
|
# ============================================================================
|
|
|
|
playback_mode = "arrangement" if "Playback: arrangement" in track_result else "session"
|
|
ableton = get_ableton_connection()
|
|
try:
|
|
ableton.send_command("jump_to", {"time": 0})
|
|
except Exception:
|
|
pass
|
|
|
|
if auto_play:
|
|
playback_result = start_playback(ctx)
|
|
if playback_mode == "arrangement":
|
|
results = [track_result, arrangement_result]
|
|
if automation_result:
|
|
results.append(automation_result)
|
|
results.append(playback_result)
|
|
return "\n\n".join(results)
|
|
|
|
fire_scene_result = fire_scene(ctx, 0)
|
|
results = [track_result, arrangement_result]
|
|
if automation_result:
|
|
results.append(automation_result)
|
|
results.extend([fire_scene_result, playback_result])
|
|
return "\n\n".join(results)
|
|
|
|
results = [track_result, arrangement_result]
|
|
if automation_result:
|
|
results.append(automation_result)
|
|
return "\n\n".join(results)
|
|
|
|
|
|
|
|
@mcp.tool()
|
|
def generate_with_human_feel(ctx: Context, genre: str, bpm: float = 0, key: str = "",
|
|
humanize: bool = True, groove_style: str = "shuffle",
|
|
structure: str = "standard") -> str:
|
|
"""
|
|
T040-T050: Genera un track con human feel aplicado.
|
|
|
|
Args:
|
|
genre: Genero musical
|
|
bpm: BPM (0 = auto)
|
|
key: Tonalidad
|
|
humanize: Aplicar humanizacion de timing/velocity
|
|
groove_style: Estilo de groove (straight, shuffle, triplet, latin)
|
|
structure: Estructura de la cancion
|
|
"""
|
|
try:
|
|
logger.info(f"Generando {genre} con human feel (groove={groove_style})")
|
|
|
|
# Get generator
|
|
generator = get_song_generator()
|
|
|
|
# Select palette anchors first
|
|
palette = _select_anchor_folders(genre, key, bpm)
|
|
|
|
# Generate config with palette
|
|
config = generator.generate_config(genre, style="", bpm=bpm, key=key,
|
|
structure=structure, palette=palette)
|
|
|
|
# Initialize human feel engine
|
|
human_engine = HumanFeelEngine(seed=config.get('variant_seed', 42))
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "generate_with_human_feel",
|
|
"config": config,
|
|
"palette": palette,
|
|
"humanize": humanize,
|
|
"groove_style": groove_style,
|
|
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
# ============================================================================
|
|
# FASE 3: HUMAN FEEL & DYNAMICS TOOLS (T040-T050)
|
|
# ============================================================================
|
|
|
|
# FASE 3: HUMAN FEEL & DYNAMICS TOOLS (T040-T050)
|
|
|
|
@mcp.tool()
|
|
def apply_clip_fades(ctx: Context, track_index: int, clip_index: int,
|
|
fade_in_bars: float = 0.0, fade_out_bars: float = 0.0) -> str:
|
|
"""
|
|
T041: Aplica fades in/out a un clip.
|
|
|
|
Args:
|
|
track_index: Índice del track
|
|
clip_index: Índice del clip
|
|
fade_in_bars: Duración del fade in (en beats/bars)
|
|
fade_out_bars: Duración del fade out (en beats/bars)
|
|
|
|
Ejemplo: Intro fade-in 4-8 bars, Outro fade-out simétrico, Break fade-down/up
|
|
"""
|
|
try:
|
|
conn = get_ableton_connection()
|
|
|
|
# 1. Obtener info del clip para saber su duración
|
|
clip_info = conn.send_command("get_clip_info", {
|
|
"track_index": track_index,
|
|
"clip_index": clip_index
|
|
})
|
|
|
|
if not isinstance(clip_info, dict) or clip_info.get("status") != "ok":
|
|
return json.dumps({"error": "Could not get clip info"}, indent=2)
|
|
|
|
clip_length = clip_info.get("length", 4.0)
|
|
|
|
# 2. Crear puntos de automatización para volumen
|
|
envelope_points = []
|
|
|
|
if fade_in_bars > 0:
|
|
# Fade in: 0.0 -> 1.0
|
|
envelope_points.extend([
|
|
{"time": 0.0, "value": 0.0},
|
|
{"time": fade_in_bars, "value": 1.0}
|
|
])
|
|
else:
|
|
envelope_points.append({"time": 0.0, "value": 1.0})
|
|
|
|
if fade_out_bars > 0:
|
|
# Fade out: 1.0 -> 0.0 (al final del clip)
|
|
fade_start = max(0, clip_length - fade_out_bars)
|
|
envelope_points.extend([
|
|
{"time": fade_start, "value": 1.0},
|
|
{"time": clip_length, "value": 0.0}
|
|
])
|
|
|
|
# 3. Enviar comando de automatización
|
|
result = conn.send_command("write_clip_envelope", {
|
|
"track_index": track_index,
|
|
"clip_index": clip_index,
|
|
"parameter": "volume",
|
|
"points": envelope_points
|
|
})
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "apply_clip_fades",
|
|
"track_index": track_index,
|
|
"clip_index": clip_index,
|
|
"fade_in_bars": fade_in_bars,
|
|
"fade_out_bars": fade_out_bars,
|
|
"clip_length": clip_length,
|
|
"envelope_points": len(envelope_points),
|
|
"result": result
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def write_volume_automation(ctx: Context, track_index: int,
|
|
curve_type: str = "linear",
|
|
start_value: float = 0.85,
|
|
end_value: float = 0.85,
|
|
duration_bars: float = 8.0) -> str:
|
|
"""
|
|
T042: Escribe automatización de volumen con curvas.
|
|
|
|
Args:
|
|
track_index: Índice del track
|
|
curve_type: Tipo de curva ('linear', 'exponential', 's_curve', 'punch')
|
|
start_value: Volumen inicial (0.0-1.0, donde 0.85 = 0dB)
|
|
end_value: Volumen final (0.0-1.0)
|
|
duration_bars: Duración de la automatización en bars
|
|
|
|
Ejemplos:
|
|
- Build: exponential 0.5 -> 0.85 en 8 bars
|
|
- Drop punch: punch curve 0.85 -> 1.0 -> 0.85
|
|
"""
|
|
try:
|
|
conn = get_ableton_connection()
|
|
|
|
# Generar puntos según tipo de curva
|
|
points = []
|
|
num_points = 20 # Resolución de la curva
|
|
|
|
for i in range(num_points + 1):
|
|
t = i / num_points
|
|
time = t * duration_bars
|
|
|
|
if curve_type == "linear":
|
|
value = start_value + (end_value - start_value) * t
|
|
elif curve_type == "exponential":
|
|
# Curva exponencial para builds
|
|
if start_value < end_value:
|
|
value = start_value + (end_value - start_value) * (t ** 2)
|
|
else:
|
|
value = start_value - (start_value - end_value) * (t ** 0.5)
|
|
elif curve_type == "s_curve":
|
|
# Curva S suave
|
|
value = start_value + (end_value - start_value) * (3*t**2 - 2*t**3)
|
|
elif curve_type == "punch":
|
|
# Punch: sube rápido, vuelve
|
|
if t < 0.3:
|
|
value = start_value + (1.0 - start_value) * (t / 0.3)
|
|
elif t < 0.7:
|
|
peak = 1.0
|
|
value = peak - (peak - end_value) * ((t - 0.3) / 0.4)
|
|
else:
|
|
value = end_value
|
|
else:
|
|
value = start_value + (end_value - start_value) * t
|
|
|
|
points.append({"time": time, "value": max(0.0, min(1.0, value))})
|
|
|
|
# Enviar comando
|
|
result = conn.send_command("write_track_automation", {
|
|
"track_index": track_index,
|
|
"parameter": "volume",
|
|
"points": points
|
|
})
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "write_volume_automation",
|
|
"track_index": track_index,
|
|
"curve_type": curve_type,
|
|
"start_value": start_value,
|
|
"end_value": end_value,
|
|
"duration_bars": duration_bars,
|
|
"points_count": len(points),
|
|
"result": result
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def apply_sidechain_pump(ctx: Context, target_track: int,
|
|
intensity: str = "subtle",
|
|
style: str = "jackin") -> str:
|
|
"""
|
|
T045: Aplica sidechain pumping a un track.
|
|
|
|
Args:
|
|
target_track: Índice del track objetivo
|
|
intensity: 'subtle', 'moderate', 'heavy'
|
|
style: 'jackin' (cada beat), 'breathing' (cada 2 beats), 'subtle' (mínimo)
|
|
|
|
Configura un sidechain compressor en el track usando el kick como fuente.
|
|
"""
|
|
try:
|
|
conn = get_ableton_connection()
|
|
|
|
# Parámetros según intensidad
|
|
configs = {
|
|
"subtle": {"threshold": -20.0, "ratio": 2.0, "attack": 5.0, "release": 100.0},
|
|
"moderate": {"threshold": -15.0, "ratio": 4.0, "attack": 3.0, "release": 80.0},
|
|
"heavy": {"threshold": -10.0, "ratio": 8.0, "attack": 1.0, "release": 60.0}
|
|
}
|
|
|
|
config = configs.get(intensity, configs["subtle"])
|
|
|
|
# Enviar comando para configurar sidechain
|
|
result = conn.send_command("setup_sidechain", {
|
|
"target_track": target_track,
|
|
"source_track": 0, # Asume track 0 es kick
|
|
"compressor_params": config,
|
|
"style": style
|
|
})
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "apply_sidechain_pump",
|
|
"target_track": target_track,
|
|
"intensity": intensity,
|
|
"style": style,
|
|
"compressor_config": config,
|
|
"result": result
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def inject_pattern_fills(ctx: Context, track_index: int,
|
|
fill_density: str = "medium",
|
|
section: str = "drop") -> str:
|
|
"""
|
|
T048: Inyecta fills de patrón (snare rolls, flams, tom fills, hi-hat busteos).
|
|
|
|
Args:
|
|
track_index: Índice del track de drums
|
|
fill_density: 'sparse' (1 cada 8 bars), 'medium', 'heavy' (cada 2 bars)
|
|
section: Sección donde aplicar (intro, build, drop, break, outro)
|
|
|
|
Añade variación rítmica con fills en puntos estratégicos.
|
|
"""
|
|
try:
|
|
conn = get_ableton_connection()
|
|
|
|
# Configurar densidad
|
|
density_config = {
|
|
"sparse": {"interval_bars": 8, "fill_length": 1},
|
|
"medium": {"interval_bars": 4, "fill_length": 2},
|
|
"heavy": {"interval_bars": 2, "fill_length": 4}
|
|
}
|
|
|
|
config = density_config.get(fill_density, density_config["medium"])
|
|
|
|
# Generar fills
|
|
result = conn.send_command("inject_fills", {
|
|
"track_index": track_index,
|
|
"fill_type": "auto", # snare_roll, flam, tom_fill, hihat_burst
|
|
"interval_bars": config["interval_bars"],
|
|
"fill_length_bars": config["fill_length"],
|
|
"section": section
|
|
})
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "inject_pattern_fills",
|
|
"track_index": track_index,
|
|
"fill_density": fill_density,
|
|
"section": section,
|
|
"config": config,
|
|
"result": result
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def humanize_set(ctx: Context, intensity: float = 0.5) -> str:
|
|
"""
|
|
T050: Herramienta paraguas para humanizar todo el set.
|
|
|
|
Args:
|
|
intensity: Nivel de humanización (0.3 = sutil, 0.6 = medio, 1.0 = extremo)
|
|
|
|
Aplica timing variation, velocity humanize y groove a todos los clips MIDI.
|
|
"""
|
|
try:
|
|
conn = get_ableton_connection()
|
|
from human_feel import HumanFeelEngine
|
|
|
|
# Obtener todos los tracks
|
|
tracks_response = conn.send_command("get_all_tracks")
|
|
if not isinstance(tracks_response, dict):
|
|
return json.dumps({"error": "Could not get tracks"}, indent=2)
|
|
|
|
tracks = tracks_response.get("tracks", [])
|
|
results = []
|
|
|
|
engine = HumanFeelEngine(seed=int(time.time()))
|
|
|
|
for track in tracks:
|
|
track_idx = track.get("index")
|
|
is_midi = track.get("is_midi", False)
|
|
|
|
if not is_midi:
|
|
continue
|
|
|
|
# Aplicar humanización a clips MIDI
|
|
clips = track.get("clips", [])
|
|
for clip in clips:
|
|
clip_idx = clip.get("index", 0)
|
|
|
|
# Aplicar human feel según intensidad
|
|
if intensity >= 0.6:
|
|
# Timing + Velocity + Groove
|
|
settings = {
|
|
"timing_variation_ms": intensity * 10,
|
|
"velocity_variance": intensity * 0.1,
|
|
"groove_style": "shuffle" if intensity > 0.7 else "straight"
|
|
}
|
|
else:
|
|
# Solo velocity
|
|
settings = {
|
|
"velocity_variance": intensity * 0.05
|
|
}
|
|
|
|
results.append({
|
|
"track": track_idx,
|
|
"clip": clip_idx,
|
|
"settings": settings
|
|
})
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "humanize_set",
|
|
"intensity": intensity,
|
|
"tracks_affected": len(results),
|
|
"clips_processed": len(results),
|
|
"details": results
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
|
|
|
|
# ============================================================================
|
|
# ============================================================================
|
|
# FASE 4: KEY COMPATIBILITY & TONAL TOOLS (T051-T062)
|
|
# ============================================================================
|
|
|
|
@mcp.tool()
|
|
def analyze_key_compatibility(ctx: Context, key1: str, key2: str) -> str:
|
|
"""
|
|
T052-T053: Analiza compatibilidad armónica entre dos keys.
|
|
|
|
Args:
|
|
key1: Primera key (ej: "F#m", "C", "Am")
|
|
key2: Segunda key
|
|
|
|
Returns:
|
|
JSON con score de compatibilidad, distancia, relación,
|
|
y keys relacionadas recomendadas.
|
|
"""
|
|
try:
|
|
analyzer = get_key_matrix()
|
|
report = analyzer.get_compatibility_report(key1, key2)
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "analyze_key_compatibility",
|
|
"key1": key1,
|
|
"key2": key2,
|
|
"compatibility_score": round(report['compatibility_score'], 2),
|
|
"relationship": report.get('relationship', 'unknown'),
|
|
"compatible": report['compatible'],
|
|
"semitone_distance": report.get('semitone_distance', 0),
|
|
"suggested_modulations": {
|
|
"fifth_up": analyzer.suggest_key_change(key1, "fifth_up"),
|
|
"fifth_down": analyzer.suggest_key_change(key1, "fifth_down"),
|
|
"relative": analyzer.suggest_key_change(key1, "relative"),
|
|
"parallel": analyzer.suggest_key_change(key1, "parallel")
|
|
},
|
|
"related_keys": [
|
|
{"key": k, "score": round(s, 2)}
|
|
for k, s in analyzer.get_related_keys(key1, min_score=0.70)[:5]
|
|
]
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def suggest_key_change(ctx: Context, current_key: str,
|
|
direction: str = "fifth_up") -> str:
|
|
"""
|
|
T054: Sugiere cambio de key armónico.
|
|
|
|
Args:
|
|
current_key: Key actual (ej: "Am", "F#m")
|
|
direction: Tipo de cambio:
|
|
- 'fifth_up': Quinta arriba (más energía)
|
|
- 'fifth_down': Quinta abajo (más suave)
|
|
- 'relative': Relativo mayor/menor
|
|
- 'parallel': Paralelo mayor/menor
|
|
|
|
Returns:
|
|
Key sugerida y explicación.
|
|
"""
|
|
try:
|
|
analyzer = get_key_matrix()
|
|
suggested = analyzer.suggest_key_change(current_key, direction)
|
|
|
|
explanations = {
|
|
"fifth_up": "Subir una quinta añade tensión y energía (círculo de quintas)",
|
|
"fifth_down": "Bajar una quinta suaviza la progresión (círculo de quintas inverso)",
|
|
"relative": "El relativo comparte las mismas notas diatónicas (mismo key signature)",
|
|
"parallel": "El paralelo cambia el modo pero mantiene la tónica"
|
|
}
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "suggest_key_change",
|
|
"current_key": current_key,
|
|
"direction": direction,
|
|
"suggested_key": suggested,
|
|
"explanation": explanations.get(direction, "Cambio armónico"),
|
|
"all_options": {
|
|
"fifth_up": analyzer.suggest_key_change(current_key, "fifth_up"),
|
|
"fifth_down": analyzer.suggest_key_change(current_key, "fifth_down"),
|
|
"relative": analyzer.suggest_key_change(current_key, "relative"),
|
|
"parallel": analyzer.suggest_key_change(current_key, "parallel")
|
|
}
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def validate_sample_key(ctx: Context, sample_key: str,
|
|
project_key: str,
|
|
tolerance: float = 0.70) -> str:
|
|
"""
|
|
T055: Valida si un sample es compatible tonalmente con el proyecto.
|
|
|
|
Args:
|
|
sample_key: Key del sample
|
|
project_key: Key del proyecto
|
|
tolerance: Score mínimo de compatibilidad (default 0.70)
|
|
|
|
Returns:
|
|
JSON con validación y recomendaciones.
|
|
"""
|
|
try:
|
|
analyzer = get_key_matrix()
|
|
score = analyzer.get_compatibility(sample_key, project_key)
|
|
is_compatible = score >= tolerance
|
|
|
|
recommendation = None
|
|
if not is_compatible:
|
|
# Sugerir alternativas
|
|
related = analyzer.get_related_keys(project_key, min_score=0.85)
|
|
if related:
|
|
recommendation = f"Considerar usar key {related[0][0]} (score: {related[0][1]:.2f})"
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "validate_sample_key",
|
|
"sample_key": sample_key,
|
|
"project_key": project_key,
|
|
"compatibility_score": round(score, 2),
|
|
"tolerance": tolerance,
|
|
"compatible": is_compatible,
|
|
"recommendation": recommendation,
|
|
"reject_sample": score < 0.40
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def analyze_spectral_fit(ctx: Context, spectral_centroid: float,
|
|
role: str) -> str:
|
|
"""
|
|
T057: Analiza qué tan bien el brillo espectral se ajusta al rol.
|
|
|
|
Args:
|
|
spectral_centroid: Centroide espectral en Hz
|
|
role: Rol del sample (sub_bass, bass, kick, pad, lead, etc.)
|
|
|
|
Returns:
|
|
JSON con score de ajuste y tag espectral.
|
|
"""
|
|
try:
|
|
analyzer = get_tonal_analyzer()
|
|
|
|
fit_score = analyzer.analyze_spectral_fit(spectral_centroid, role)
|
|
color_tag = analyzer.tag_spectral_color(spectral_centroid)
|
|
|
|
# Rangos óptimos para referencia
|
|
optimal_ranges = {
|
|
'sub_bass': '0-100 Hz',
|
|
'bass': '100-500 Hz',
|
|
'kick': '200-1000 Hz',
|
|
'pad': '500-3000 Hz',
|
|
'chords': '800-4000 Hz',
|
|
'lead': '1000-6000 Hz',
|
|
'pluck': '1500-5000 Hz',
|
|
'atmos': '300-8000 Hz',
|
|
'fx': '500-10000 Hz'
|
|
}
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "analyze_spectral_fit",
|
|
"spectral_centroid_hz": round(spectral_centroid, 1),
|
|
"role": role,
|
|
"fit_score": round(fit_score, 2),
|
|
"spectral_color": color_tag,
|
|
"optimal_range": optimal_ranges.get(role, "Variable"),
|
|
"recommendation": "Ajuste espectral óptimo" if fit_score > 0.8 else "Considerar EQ o seleccionar otro sample"
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
# ============================================================================
|
|
# FASE 6: MASTERING & QA TOOLS (T078-T090)
|
|
# ============================================================================
|
|
|
|
# FASE 6: MASTERING & QA TOOLS (T078-T090)
|
|
|
|
@mcp.tool()
|
|
def calibrate_gain_staging(ctx: Context, target_lufs: float = None) -> str:
|
|
"""
|
|
T079: Calibra gain staging del set midiendo y ajustando niveles.
|
|
|
|
Args:
|
|
target_lufs: LUFS objetivo para el master (-8 para club, -14 para streaming)
|
|
|
|
Mide LUFS de cada bus y ajusta faders para targets:
|
|
- Drums (kick): -8 LUFS
|
|
- Bass: -10 LUFS
|
|
- Music: -12 LUFS
|
|
"""
|
|
try:
|
|
conn = get_ableton_connection()
|
|
|
|
# Targets por bus
|
|
bus_targets = {
|
|
"drums": -8.0,
|
|
"bass": -10.0,
|
|
"music": -12.0,
|
|
"vocals": -14.0,
|
|
"fx": -16.0
|
|
}
|
|
|
|
# Obtener todos los tracks
|
|
tracks_response = conn.send_command("get_all_tracks")
|
|
if not isinstance(tracks_response, dict):
|
|
return json.dumps({"error": "Could not get tracks"}, indent=2)
|
|
|
|
tracks = tracks_response.get("tracks", [])
|
|
adjustments = []
|
|
|
|
for track in tracks:
|
|
track_name = track.get("name", "").lower()
|
|
track_idx = track.get("index")
|
|
|
|
# Identificar bus por nombre
|
|
target_lufs_bus = None
|
|
for bus, target in bus_targets.items():
|
|
if bus in track_name:
|
|
target_lufs_bus = target
|
|
break
|
|
|
|
if target_lufs_bus is None:
|
|
continue
|
|
|
|
# Medir nivel actual (simulado - en realidad necesitaría audio analysis)
|
|
# current_lufs = medir_lufs_real(track)
|
|
# Por ahora usamos volumen actual como proxy
|
|
current_volume = track.get("volume", 0.85)
|
|
|
|
# Calcular ajuste necesario
|
|
# Aproximación: 0.85 volumen ~= -12 LUFS para music
|
|
# Cada 0.1 en volumen ~= 3dB ~= 3 LUFS
|
|
current_lufs_est = -12.0 + (0.85 - current_volume) * 30
|
|
lufs_diff = target_lufs_bus - current_lufs_est
|
|
|
|
# Convertir diferencia LUFS a ajuste de volumen
|
|
# ~3dB por duplicación de amplitud
|
|
volume_adjustment = lufs_diff / 30.0
|
|
new_volume = max(0.1, min(1.0, current_volume + volume_adjustment))
|
|
|
|
# Aplicar ajuste
|
|
conn.send_command("set_track_volume", {
|
|
"track_index": track_idx,
|
|
"volume": new_volume
|
|
})
|
|
|
|
adjustments.append({
|
|
"track": track_idx,
|
|
"name": track_name,
|
|
"bus": next((b for b in bus_targets if b in track_name), "unknown"),
|
|
"old_volume": round(current_volume, 3),
|
|
"new_volume": round(new_volume, 3),
|
|
"target_lufs": target_lufs_bus,
|
|
"estimated_lufs": round(current_lufs_est, 1),
|
|
"adjustment_db": round(lufs_diff, 1)
|
|
})
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "calibrate_gain_staging",
|
|
"tracks_adjusted": len(adjustments),
|
|
"adjustments": adjustments,
|
|
"target_profile": "club" if target_lufs == -8.0 else "streaming" if target_lufs == -14.0 else "auto",
|
|
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def run_mix_quality_check(ctx: Context) -> str:
|
|
"""
|
|
T085: Ejecuta quality check completo del mix.
|
|
|
|
Verifica:
|
|
- LUFS integrado del master
|
|
- True peak (dBTP)
|
|
- RMS balance L/R
|
|
- Correlation mono
|
|
- Headroom
|
|
|
|
Returns JSON con métricas y flags de issues.
|
|
"""
|
|
try:
|
|
conn = get_ableton_connection()
|
|
|
|
# Obtener master info
|
|
master_response = conn.send_command("get_master_info")
|
|
if not isinstance(master_response, dict):
|
|
master_response = {}
|
|
|
|
# Métricas simuladas (en implementación real vendrían de análisis de audio)
|
|
metrics = {
|
|
"lufs_integrated": master_response.get("lufs", -12.0),
|
|
"true_peak_db": master_response.get("true_peak", -0.5),
|
|
"rms_left": master_response.get("rms_left", -15.0),
|
|
"rms_right": master_response.get("rms_right", -15.2),
|
|
"correlation": master_response.get("correlation", 0.95),
|
|
"headroom_db": master_response.get("headroom", 6.0)
|
|
}
|
|
|
|
# Detectar issues
|
|
issues = []
|
|
|
|
# LUFS check
|
|
if metrics["lufs_integrated"] > -8.0:
|
|
issues.append({
|
|
"type": "lufs_too_high",
|
|
"severity": "warning",
|
|
"message": f"LUFS {metrics['lufs_integrated']:.1f} too high for streaming",
|
|
"suggestion": "Reduce master gain or increase limiting"
|
|
})
|
|
elif metrics["lufs_integrated"] < -16.0:
|
|
issues.append({
|
|
"type": "lufs_too_low",
|
|
"severity": "info",
|
|
"message": f"LUFS {metrics['lufs_integrated']:.1f} very low",
|
|
"suggestion": "Consider increasing gain for club play"
|
|
})
|
|
|
|
# True peak check
|
|
if metrics["true_peak_db"] > -1.0:
|
|
issues.append({
|
|
"type": "true_peak",
|
|
"severity": "error",
|
|
"message": f"True peak {metrics['true_peak_db']:.1f} dBTP too high",
|
|
"suggestion": "Lower limiter ceiling to -1.0 dBTP"
|
|
})
|
|
|
|
# L/R balance check
|
|
rms_diff = abs(metrics["rms_left"] - metrics["rms_right"])
|
|
if rms_diff > 3.0:
|
|
issues.append({
|
|
"type": "lr_imbalance",
|
|
"severity": "warning",
|
|
"message": f"L/R imbalance: {rms_diff:.1f} dB",
|
|
"suggestion": "Check panning and stereo width"
|
|
})
|
|
|
|
# Correlation check (mono compatibility)
|
|
if metrics["correlation"] < 0.5:
|
|
issues.append({
|
|
"type": "mono_compatibility",
|
|
"severity": "warning",
|
|
"message": f"Correlation {metrics['correlation']:.2f} - poor mono compatibility",
|
|
"suggestion": "Check phase issues in stereo widening"
|
|
})
|
|
|
|
# Headroom check
|
|
if metrics["headroom_db"] < 3.0:
|
|
issues.append({
|
|
"type": "low_headroom",
|
|
"severity": "error",
|
|
"message": f"Headroom only {metrics['headroom_db']:.1f} dB",
|
|
"suggestion": "Reduce track gains to achieve >6dB headroom"
|
|
})
|
|
|
|
# Calcular score
|
|
errors = len([i for i in issues if i["severity"] == "error"])
|
|
warnings = len([i for i in issues if i["severity"] == "warning"])
|
|
|
|
if errors > 0:
|
|
score = "fail"
|
|
elif warnings > 2:
|
|
score = "pass_with_warnings"
|
|
elif warnings > 0:
|
|
score = "good"
|
|
else:
|
|
score = "excellent"
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "run_mix_quality_check",
|
|
"score": score,
|
|
"metrics": metrics,
|
|
"issues": issues,
|
|
"errors": errors,
|
|
"warnings": warnings,
|
|
"passes": errors == 0,
|
|
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def export_stem_mixdown(ctx: Context, output_dir: str = None,
|
|
bus_names: str = None,
|
|
include_metadata: bool = True) -> str:
|
|
"""
|
|
T087: Exporta stems 24-bit/44.1kHz separados por bus.
|
|
|
|
Args:
|
|
output_dir: Directorio de salida (default: ~/AbletonMCP_Exports/)
|
|
bus_names: Lista de buses a exportar (comma-separated: drums,bass,music,master)
|
|
include_metadata: Incluir metadata BPM/key en los archivos
|
|
|
|
Exporta stems individuales para cada bus.
|
|
"""
|
|
try:
|
|
from audio_mastering import StemExporter
|
|
from datetime import datetime
|
|
import os
|
|
|
|
# Default buses
|
|
if bus_names is None:
|
|
buses = ["drums", "bass", "music", "vocals", "fx", "master"]
|
|
else:
|
|
buses = [b.strip() for b in bus_names.split(",")]
|
|
|
|
# Default output dir
|
|
if output_dir is None:
|
|
output_dir = os.path.expanduser("~/AbletonMCP_Exports")
|
|
os.makedirs(output_dir, exist_ok=True)
|
|
|
|
# Metadata
|
|
metadata = None
|
|
if include_metadata:
|
|
conn = get_ableton_connection()
|
|
set_info = conn.send_command("get_set_info")
|
|
if isinstance(set_info, dict):
|
|
metadata = {
|
|
"bpm": set_info.get("tempo", 128),
|
|
"key": set_info.get("key", "Am"),
|
|
"genre": set_info.get("genre", "Tech House"),
|
|
"export_date": datetime.now().isoformat()
|
|
}
|
|
|
|
# Exportar stems
|
|
result = StemExporter.export_stem_mixdown(
|
|
output_dir=output_dir,
|
|
bus_names=buses,
|
|
metadata=metadata
|
|
)
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "export_stem_mixdown",
|
|
"output_dir": output_dir,
|
|
"total_stems": result.get("total_stems", 0),
|
|
"exported_files": result.get("exported_files", {}),
|
|
"timestamp": result.get("timestamp", datetime.now().strftime("%Y%m%d_%H%M%S")),
|
|
"format": "WAV 24-bit/44.1kHz"
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def reset_diversity_memory(ctx: Context) -> str:
|
|
"""
|
|
Resetea la memoria de diversidad entre generaciones.
|
|
|
|
Útil para empezar una nueva sesión sin influencia de generaciones previas.
|
|
"""
|
|
results = []
|
|
|
|
# Reset sample cross-generation memory
|
|
if reset_cross_generation_memory is not None:
|
|
reset_cross_generation_memory()
|
|
results.append("sample_memory_reset")
|
|
|
|
# Reset reference listener memory
|
|
listener = get_reference_listener()
|
|
if listener is not None and hasattr(listener, "reset_cross_generation_tracking"):
|
|
listener.reset_cross_generation_tracking()
|
|
results.append("reference_memory_reset")
|
|
|
|
# Reset pattern variant memory for MIDI
|
|
try:
|
|
from song_generator import reset_pattern_variant_memory
|
|
reset_pattern_variant_memory()
|
|
results.append("pattern_variant_memory_reset")
|
|
except ImportError:
|
|
pass
|
|
|
|
logger.info("Cross-generation diversity memory reset: %s", ", ".join(results))
|
|
return json.dumps({
|
|
"status": "reset",
|
|
"components": results,
|
|
"timestamp": time.time()
|
|
}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def arrange_song_structure(ctx: Context, structure: str = "standard", exact: bool = False) -> str:
|
|
"""
|
|
Crea o renombra scenes usando una estructura musical util para produccion.
|
|
"""
|
|
try:
|
|
ableton = get_ableton_connection()
|
|
sections = SONG_STRUCTURE_PRESETS.get(structure.lower(), SONG_STRUCTURE_PRESETS["standard"])
|
|
|
|
session_response = ableton.send_command("get_session_info")
|
|
if _is_error_response(session_response):
|
|
return f"Error: {session_response.get('message')}"
|
|
|
|
current_scenes = session_response.get("result", {}).get("num_scenes", 0)
|
|
|
|
while current_scenes < len(sections):
|
|
create_response = ableton.send_command("create_scene", {"index": -1})
|
|
if _is_error_response(create_response):
|
|
return f"Error creando scenes: {create_response.get('message')}"
|
|
current_scenes += 1
|
|
|
|
while exact and current_scenes > len(sections):
|
|
delete_response = ableton.send_command("delete_scene", {"index": current_scenes - 1})
|
|
if _is_error_response(delete_response):
|
|
return f"Error recortando scenes: {delete_response.get('message')}"
|
|
current_scenes -= 1
|
|
|
|
for index, (name, bars, color) in enumerate(sections):
|
|
label = f"{name} [{bars} bars]"
|
|
|
|
rename_response = ableton.send_command("set_scene_name", {
|
|
"scene_index": index,
|
|
"name": label
|
|
})
|
|
if _is_error_response(rename_response):
|
|
return f"Error nombrando scene {index}: {rename_response.get('message')}"
|
|
|
|
ableton.send_command("set_scene_color", {
|
|
"scene_index": index,
|
|
"color": color
|
|
})
|
|
|
|
output = [f"Estructura '{structure}' aplicada ({len(sections)} scenes):"]
|
|
for index, (name, bars, _) in enumerate(sections):
|
|
output.append(f"{index}. {name} [{bars} bars]")
|
|
return "\n".join(output)
|
|
|
|
except Exception as e:
|
|
return f"Error organizando estructura: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def search_samples(ctx: Context, query: str, category: str = "", limit: int = 10) -> str:
|
|
"""
|
|
Busca samples en la librería local
|
|
|
|
Args:
|
|
query: Término de búsqueda (e.g., "kick", "bass", "hat")
|
|
category: Categoría (kick, snare, hat, bass, synth, percussion, vocal)
|
|
limit: Número máximo de resultados
|
|
"""
|
|
try:
|
|
if SampleIndex is None:
|
|
return "✗ Error: Módulo sample_index no disponible"
|
|
|
|
sample_index = get_sample_index()
|
|
results = sample_index.search(query, category, limit)
|
|
|
|
if not results:
|
|
return f"No se encontraron samples para '{query}'"
|
|
|
|
output = [f"Samples encontrados para '{query}':\n"]
|
|
for i, sample in enumerate(results, 1):
|
|
output.append(f"{i}. {sample['name']} ({sample['category']})")
|
|
output.append(f" Path: {sample['path']}")
|
|
if 'key' in sample:
|
|
output.append(f" Key: {sample['key']}, BPM: {sample.get('bpm', 'N/A')}")
|
|
output.append("")
|
|
|
|
return "\n".join(output)
|
|
|
|
except Exception as e:
|
|
return f"✗ Error buscando samples: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def create_drum_pattern(
|
|
ctx: Context,
|
|
track_index: int,
|
|
clip_index: int,
|
|
style: str = "techno",
|
|
pattern_type: str = "full",
|
|
length: float = 4.0
|
|
) -> str:
|
|
"""
|
|
Crea un patrón de batería predefinido
|
|
|
|
Args:
|
|
track_index: Índice del track MIDI donde crear el patrón
|
|
clip_index: Índice del clip/slot
|
|
style: Estilo (techno, house, trance, minimal)
|
|
pattern_type: Tipo de patrón (full, kick-only, hats-only, minimal)
|
|
length: Duración en beats
|
|
|
|
Notas:
|
|
- Crea automáticamente el clip si no existe
|
|
- Usa notas MIDI estándar (C1=Kick, D1=Snare, F#1=CH, A#1=OH)
|
|
"""
|
|
try:
|
|
if SongGenerator is None:
|
|
return "✗ Error: Módulo song_generator no disponible"
|
|
|
|
generator = get_song_generator()
|
|
notes = generator.create_drum_pattern(style, pattern_type, length)
|
|
|
|
# Crear clip si no existe
|
|
ableton = get_ableton_connection()
|
|
|
|
response = ableton.send_command("add_notes_to_clip", {
|
|
"track_index": track_index,
|
|
"clip_index": clip_index,
|
|
"notes": notes
|
|
})
|
|
|
|
if _is_error_response(response):
|
|
ableton.send_command("create_clip", {
|
|
"track_index": track_index,
|
|
"clip_index": clip_index,
|
|
"length": length
|
|
})
|
|
response = ableton.send_command("add_notes_to_clip", {
|
|
"track_index": track_index,
|
|
"clip_index": clip_index,
|
|
"notes": notes
|
|
})
|
|
|
|
if response.get("status") == "success":
|
|
return f"✓ Patrón de batería '{style}' creado ({len(notes)} notas)"
|
|
else:
|
|
return f"✗ Error: {response.get('message')}"
|
|
|
|
except Exception as e:
|
|
return f"✗ Error creando patrón: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def create_bassline(
|
|
ctx: Context,
|
|
track_index: int,
|
|
clip_index: int,
|
|
key: str,
|
|
style: str = "rolling",
|
|
length: float = 4.0
|
|
) -> str:
|
|
"""
|
|
Crea una línea de bajo musical
|
|
|
|
Args:
|
|
track_index: Índice del track MIDI
|
|
clip_index: Índice del clip
|
|
key: Tonalidad (e.g., "Am", "F#m", "C")
|
|
style: Estilo (rolling, minimal, acid, walking, offbeat)
|
|
length: Duración en beats
|
|
"""
|
|
try:
|
|
if SongGenerator is None:
|
|
return "✗ Error: Módulo song_generator no disponible"
|
|
|
|
generator = get_song_generator()
|
|
notes = generator.create_bassline(key, style, length)
|
|
|
|
ableton = get_ableton_connection()
|
|
|
|
# Crear clip
|
|
ableton.send_command("create_clip", {
|
|
"track_index": track_index,
|
|
"clip_index": clip_index,
|
|
"length": length
|
|
})
|
|
|
|
# Agregar notas
|
|
response = ableton.send_command("add_notes_to_clip", {
|
|
"track_index": track_index,
|
|
"clip_index": clip_index,
|
|
"notes": notes
|
|
})
|
|
|
|
if response.get("status") == "success":
|
|
return f"✓ Bassline '{style}' en {key} creado ({len(notes)} notas)"
|
|
else:
|
|
return f"✗ Error: {response.get('message')}"
|
|
|
|
except Exception as e:
|
|
return f"✗ Error creando bassline: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def create_chord_progression(
|
|
ctx: Context,
|
|
track_index: int,
|
|
clip_index: int,
|
|
key: str,
|
|
progression_type: str = "techno",
|
|
length: float = 16.0
|
|
) -> str:
|
|
"""
|
|
Crea una progresión de acordes
|
|
|
|
Args:
|
|
track_index: Índice del track MIDI
|
|
clip_index: Índice del clip
|
|
key: Tonalidad (e.g., "Am", "F#m", "C")
|
|
progression_type: Tipo (techno, house, deep, minor)
|
|
length: Duración en beats (usualmente 16 = 4 compases)
|
|
"""
|
|
try:
|
|
if SongGenerator is None:
|
|
return "✗ Error: Módulo song_generator no disponible"
|
|
|
|
generator = get_song_generator()
|
|
notes = generator.create_chord_progression(key, progression_type, length)
|
|
|
|
ableton = get_ableton_connection()
|
|
|
|
# Crear clip
|
|
ableton.send_command("create_clip", {
|
|
"track_index": track_index,
|
|
"clip_index": clip_index,
|
|
"length": length
|
|
})
|
|
|
|
# Agregar notas
|
|
response = ableton.send_command("add_notes_to_clip", {
|
|
"track_index": track_index,
|
|
"clip_index": clip_index,
|
|
"notes": notes
|
|
})
|
|
|
|
if response.get("status") == "success":
|
|
return f"✓ Progresión '{progression_type}' en {key} creada ({len(notes)} notas)"
|
|
else:
|
|
return f"✗ Error: {response.get('message')}"
|
|
|
|
except Exception as e:
|
|
return f"✗ Error creando progresión: {str(e)}"
|
|
|
|
|
|
# ============================================================================
|
|
# HERRAMIENTAS MCP - Sistema Avanzado de Samples
|
|
# ============================================================================
|
|
|
|
@mcp.tool()
|
|
def scan_sample_library(
|
|
ctx: Context,
|
|
analyze_audio: bool = False
|
|
) -> str:
|
|
"""
|
|
Escanear la librería de samples completa.
|
|
|
|
Args:
|
|
analyze_audio: Analizar contenido de audio (más lento pero más preciso)
|
|
|
|
Returns:
|
|
Estadísticas del escaneo
|
|
"""
|
|
try:
|
|
manager = get_sample_manager()
|
|
if not manager:
|
|
return "✗ Error: Sistema de samples no disponible"
|
|
|
|
def progress(current, total, filename):
|
|
pct = (current / total) * 100 if total > 0 else 0
|
|
logger.info(f"Escaneando: {pct:.1f}% - {filename}")
|
|
|
|
stats = manager.scan_directory(analyze_audio=analyze_audio, progress_callback=progress)
|
|
|
|
return f"""✓ Escaneo completado:
|
|
- Procesados: {stats['processed']}
|
|
- Agregados: {stats['added']}
|
|
- Actualizados: {stats['updated']}
|
|
- Errores: {stats['errors']}
|
|
- Total en librería: {stats['total_samples']}"""
|
|
|
|
except Exception as e:
|
|
return f"✗ Error escaneando librería: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def get_sample_library_stats(ctx: Context) -> str:
|
|
"""Obtiene estadísticas detalladas de la librería de samples"""
|
|
try:
|
|
manager = get_sample_manager()
|
|
if not manager:
|
|
return "✗ Error: Sistema de samples no disponible"
|
|
|
|
stats = manager.get_stats()
|
|
|
|
output = ["📊 Estadísticas de la Librería de Samples", "=" * 50]
|
|
output.append(f"Total samples: {stats['total_samples']}")
|
|
output.append(f"Tamaño total: {stats['total_size'] / (1024**2):.1f} MB")
|
|
output.append(f"Último escaneo: {stats['last_scan'] or 'Nunca'}")
|
|
|
|
if stats['by_category']:
|
|
output.append("\nPor categoría:")
|
|
for cat, count in sorted(stats['by_category'].items(), key=lambda x: -x[1]):
|
|
output.append(f" {cat}: {count}")
|
|
|
|
if stats['by_key']:
|
|
output.append("\nPor key:")
|
|
for key, count in sorted(stats['by_key'].items(), key=lambda x: -x[1]):
|
|
output.append(f" {key}: {count}")
|
|
|
|
return "\n".join(output)
|
|
|
|
except Exception as e:
|
|
return f"✗ Error obteniendo estadísticas: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def advanced_search_samples(
|
|
ctx: Context,
|
|
query: str = "",
|
|
category: str = "",
|
|
sample_type: str = "",
|
|
key: str = "",
|
|
bpm: float = 0,
|
|
bpm_tolerance: int = 5,
|
|
genres: str = "",
|
|
tags: str = "",
|
|
limit: int = 20
|
|
) -> str:
|
|
"""
|
|
Búsqueda avanzada de samples con múltiples filtros.
|
|
|
|
Args:
|
|
query: Término de búsqueda en nombre
|
|
category: Categoría (drums, bass, synths, vocals, loops, one_shots)
|
|
sample_type: Tipo específico (kick, snare, bass, lead, pad, etc.)
|
|
key: Tonalidad musical (Am, F#m, C, etc.)
|
|
bpm: BPM objetivo (0 = ignorar)
|
|
bpm_tolerance: Tolerancia de BPM (+/-)
|
|
genres: Géneros separados por coma (techno, house, deep-house)
|
|
tags: Tags separados por coma
|
|
limit: Máximo de resultados
|
|
|
|
Ejemplos:
|
|
- advanced_search_samples(category="drums", sample_type="kick")
|
|
- advanced_search_samples(key="Am", bpm=128, genres="techno,house")
|
|
- advanced_search_samples(query="punchy", category="drums")
|
|
"""
|
|
try:
|
|
manager = get_sample_manager()
|
|
if not manager:
|
|
return "✗ Error: Sistema de samples no disponible"
|
|
|
|
# Parsear listas
|
|
genre_list = [g.strip() for g in genres.split(",") if g.strip()] if genres else None
|
|
tag_list = [t.strip() for t in tags.split(",") if t.strip()] if tags else None
|
|
bpm_val = bpm if bpm > 0 else None
|
|
|
|
results = manager.search(
|
|
query=query,
|
|
category=category,
|
|
sample_type=sample_type,
|
|
key=key,
|
|
bpm=bpm_val,
|
|
bpm_tolerance=bpm_tolerance,
|
|
genres=genre_list,
|
|
tags=tag_list,
|
|
limit=limit
|
|
)
|
|
|
|
if not results:
|
|
return "No se encontraron samples con esos criterios."
|
|
|
|
output = [f"🔍 Resultados ({len(results)}):\n"]
|
|
|
|
for i, sample in enumerate(results, 1):
|
|
output.append(f"{i}. {sample.name}")
|
|
output.append(f" Tipo: {sample.category}/{sample.sample_type}")
|
|
info = []
|
|
if sample.key:
|
|
info.append(f"Key: {sample.key}")
|
|
if sample.bpm:
|
|
info.append(f"BPM: {sample.bpm:.1f}")
|
|
if sample.genres:
|
|
info.append(f"Géneros: {', '.join(sample.genres[:3])}")
|
|
if info:
|
|
output.append(f" {' | '.join(info)}")
|
|
output.append(f" Path: {sample.path}")
|
|
output.append("")
|
|
|
|
return "\n".join(output)
|
|
|
|
except Exception as e:
|
|
return f"✗ Error en búsqueda: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def select_samples_for_genre(
|
|
ctx: Context,
|
|
genre: str,
|
|
key: str = "",
|
|
bpm: float = 0
|
|
) -> str:
|
|
"""
|
|
Selecciona automáticamente samples para un género musical.
|
|
|
|
Args:
|
|
genre: Género (techno, house, tech-house, deep-house, trance, drum-and-bass, etc.)
|
|
key: Tonalidad preferida (auto-selecciona si vacío)
|
|
bpm: BPM preferido (auto-selecciona si 0)
|
|
|
|
Returns:
|
|
Pack completo de samples organizados
|
|
"""
|
|
try:
|
|
selector = get_sample_selector()
|
|
if not selector:
|
|
return "✗ Error: Selector de samples no disponible"
|
|
|
|
bpm_val = bpm if bpm > 0 else None
|
|
|
|
group = selector.select_for_genre(genre, key or None, bpm_val)
|
|
|
|
output = [f"🎵 Pack de Samples: {group.genre}", "=" * 50]
|
|
output.append(f"Key: {group.key} | BPM: {group.bpm}")
|
|
output.append("")
|
|
|
|
# Drum Kit
|
|
output.append("🥁 Drum Kit:")
|
|
kit = group.drums
|
|
if kit.kick:
|
|
output.append(f" Kick: {kit.kick.name}")
|
|
if kit.snare:
|
|
output.append(f" Snare: {kit.snare.name}")
|
|
if kit.clap:
|
|
output.append(f" Clap: {kit.clap.name}")
|
|
if kit.hat_closed:
|
|
output.append(f" Hat Closed: {kit.hat_closed.name}")
|
|
if kit.hat_open:
|
|
output.append(f" Hat Open: {kit.hat_open.name}")
|
|
|
|
# Bass
|
|
if group.bass:
|
|
output.append(f"\n🎸 Bass ({len(group.bass)} samples):")
|
|
for s in group.bass[:3]:
|
|
key_info = f" [{s.key}]" if s.key else ""
|
|
output.append(f" - {s.name}{key_info}")
|
|
|
|
# Synths
|
|
if group.synths:
|
|
output.append(f"\n🎹 Synths ({len(group.synths)} samples):")
|
|
for s in group.synths[:3]:
|
|
key_info = f" [{s.key}]" if s.key else ""
|
|
output.append(f" - {s.name}{key_info}")
|
|
|
|
# FX
|
|
if group.fx:
|
|
output.append(f"\n✨ FX ({len(group.fx)} samples):")
|
|
for s in group.fx[:2]:
|
|
output.append(f" - {s.name}")
|
|
|
|
return "\n".join(output)
|
|
|
|
except Exception as e:
|
|
return f"✗ Error seleccionando samples: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def get_drum_kit_mapping(
|
|
ctx: Context,
|
|
genre: str = "techno",
|
|
variation: str = "standard"
|
|
) -> str:
|
|
"""
|
|
Obtiene un kit de batería con mapeo MIDI completo.
|
|
|
|
Args:
|
|
genre: Género musical
|
|
variation: Variación del estilo (standard, heavy, minimal, etc.)
|
|
|
|
Returns:
|
|
Información del kit y mapeo MIDI
|
|
"""
|
|
try:
|
|
selector = get_sample_selector()
|
|
if not selector:
|
|
return "✗ Error: Selector no disponible"
|
|
|
|
kit = selector._select_drum_kit(genre, variation)
|
|
mapping = selector.get_midi_mapping_for_kit(kit)
|
|
|
|
output = [f"🥁 Drum Kit: {kit.name}", "=" * 50]
|
|
|
|
output.append("\nMapeo MIDI:")
|
|
output.append("-" * 30)
|
|
|
|
midi_notes = {
|
|
36: "C1 (Kick)",
|
|
38: "D1 (Snare)",
|
|
39: "D#1 (Clap)",
|
|
42: "F#1 (Closed Hat)",
|
|
46: "A#1 (Open Hat)",
|
|
41: "F1 (Tom Low)",
|
|
47: "B1 (Tom Mid)",
|
|
49: "C#2 (Crash)",
|
|
51: "D#2 (Ride)",
|
|
}
|
|
|
|
for note, info in sorted(mapping['notes'].items()):
|
|
note_name = midi_notes.get(note, f"Note {note}")
|
|
sample_name = info['sample'] or "(vacío)"
|
|
output.append(f"{note_name}: {sample_name}")
|
|
|
|
output.append("\nPara Drum Rack (pads 0-15):")
|
|
output.append("-" * 30)
|
|
for slot, info in sorted(mapping['drum_rack_slots'].items()):
|
|
note = info['note']
|
|
sample = info['sample'] or "(vacío)"
|
|
output.append(f"Pad {slot:2d} (Note {note}): {sample}")
|
|
|
|
return "\n".join(output)
|
|
|
|
except Exception as e:
|
|
return f"✗ Error: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def analyze_audio_file(
|
|
ctx: Context,
|
|
file_path: str
|
|
) -> str:
|
|
"""
|
|
Analiza un archivo de audio y extrae características.
|
|
|
|
Args:
|
|
file_path: Ruta completa al archivo de audio
|
|
|
|
Returns:
|
|
Análisis completo del audio
|
|
"""
|
|
try:
|
|
if analyze_sample is None:
|
|
return "Error: Analizador de audio no disponible"
|
|
|
|
if not os.path.exists(file_path):
|
|
return f"✗ Archivo no encontrado: {file_path}"
|
|
|
|
result = analyze_sample(file_path)
|
|
|
|
output = ["🔊 Análisis de Audio", "=" * 50]
|
|
output.append(f"Archivo: {os.path.basename(file_path)}")
|
|
output.append("")
|
|
output.append(f"BPM: {result.get('bpm') or 'No detectado'}")
|
|
output.append(f"Key: {result.get('key') or 'No detectado'} " +
|
|
f"(confianza: {result.get('key_confidence', 0):.2f})")
|
|
output.append(f"Duración: {result.get('duration', 0):.2f}s")
|
|
output.append(f"Sample Rate: {result.get('sample_rate', 0)} Hz")
|
|
output.append(f"Tipo detectado: {result.get('sample_type', 'unknown')}")
|
|
output.append("")
|
|
output.append(f"Es percusivo: {result.get('is_percussive', False)}")
|
|
output.append(f"Es armónico: {result.get('is_harmonic', False)}")
|
|
output.append("")
|
|
|
|
genres = result.get('suggested_genres', [])
|
|
if genres:
|
|
output.append(f"Géneros sugeridos: {', '.join(genres)}")
|
|
|
|
return "\n".join(output)
|
|
|
|
except Exception as e:
|
|
return f"✗ Error analizando audio: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def find_compatible_samples(
|
|
ctx: Context,
|
|
sample_path: str,
|
|
sample_type: str = "",
|
|
max_results: int = 10
|
|
) -> str:
|
|
"""
|
|
Encuentra samples compatibles con uno de referencia.
|
|
|
|
Args:
|
|
sample_path: Ruta del sample de referencia
|
|
sample_type: Filtrar por tipo específico
|
|
max_results: Máximo de resultados
|
|
|
|
Returns:
|
|
Lista de samples compatibles con score
|
|
"""
|
|
try:
|
|
selector = get_sample_selector()
|
|
manager = get_sample_manager()
|
|
|
|
if not selector or not manager:
|
|
return "✗ Error: Sistema de samples no disponible"
|
|
|
|
sample = manager.get_by_path(sample_path)
|
|
if not sample:
|
|
return f"✗ Sample no encontrado en la librería: {sample_path}"
|
|
|
|
compatible = selector.find_compatible_samples(
|
|
sample,
|
|
sample_type=sample_type,
|
|
max_results=max_results
|
|
)
|
|
|
|
if not compatible:
|
|
return "No se encontraron samples compatibles."
|
|
|
|
output = [f"🔍 Samples compatibles con: {sample.name}", "=" * 50]
|
|
output.append(f"Key: {sample.key or 'N/A'} | BPM: {sample.bpm or 'N/A'}")
|
|
output.append("")
|
|
|
|
for i, (s, score) in enumerate(compatible, 1):
|
|
bar_len = int(score * 20)
|
|
bar = "█" * bar_len + "░" * (20 - bar_len)
|
|
output.append(f"{i}. {s.name}")
|
|
output.append(f" Compatibilidad: [{bar}] {score:.1%}")
|
|
info = []
|
|
if s.key:
|
|
info.append(f"Key: {s.key}")
|
|
if s.bpm:
|
|
info.append(f"BPM: {s.bpm:.1f}")
|
|
if info:
|
|
output.append(f" {' | '.join(info)}")
|
|
output.append("")
|
|
|
|
return "\n".join(output)
|
|
|
|
except Exception as e:
|
|
return f"✗ Error: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def suggest_key_change(
|
|
ctx: Context,
|
|
current_key: str,
|
|
direction: str = "fifth_up"
|
|
) -> str:
|
|
"""
|
|
Sugiere cambios de tonalidad armónicos.
|
|
|
|
Args:
|
|
current_key: Key actual (ej: "Am", "F#m", "C")
|
|
direction: Tipo de cambio:
|
|
- fifth_up: Quinta arriba (más energía)
|
|
- fifth_down: Quinta abajo (más suave)
|
|
- relative: Cambio a relativo mayor/menor
|
|
- parallel: Cambio entre mayor/menor paralelo
|
|
|
|
Returns:
|
|
Key sugerida y explicación
|
|
"""
|
|
try:
|
|
selector = get_sample_selector()
|
|
if not selector:
|
|
return "✗ Error: Selector no disponible"
|
|
|
|
new_key = selector.suggest_key_change(current_key, direction)
|
|
|
|
explanations = {
|
|
"fifth_up": "Quinta arriba - Añade tensión y energía",
|
|
"fifth_down": "Quinta abajo - Más suave, resolutivo",
|
|
"relative": "Relativo mayor/menor - Cambio de modo, misma armadura",
|
|
"parallel": "Paralelo mayor/menor - Mismo root, diferente modo"
|
|
}
|
|
|
|
return f"""🎵 Cambio de Key Sugerido:
|
|
|
|
Original: {current_key}
|
|
Sugerida: {new_key}
|
|
Tipo: {explanations.get(direction, direction)}
|
|
|
|
Estos cambios son armónicamente coherentes y funcionan bien en transiciones de tracks."""
|
|
|
|
except Exception as e:
|
|
return f"✗ Error: {str(e)}"
|
|
|
|
|
|
@mcp.tool()
|
|
def get_sample_pack_for_project(
|
|
ctx: Context,
|
|
genre: str,
|
|
key: str = "",
|
|
bpm: float = 0
|
|
) -> str:
|
|
"""
|
|
Obtiene un pack completo de samples para un proyecto.
|
|
|
|
Args:
|
|
genre: Género musical
|
|
key: Tonalidad (auto-detecta si vacío)
|
|
bpm: BPM (auto-detecta si 0)
|
|
|
|
Returns:
|
|
Pack completo con todos los elementos necesarios
|
|
"""
|
|
try:
|
|
manager = get_sample_manager()
|
|
if not manager:
|
|
return "✗ Error: Sistema de samples no disponible"
|
|
|
|
bpm_val = bpm if bpm > 0 else None
|
|
|
|
pack = manager.get_pack_for_genre(genre, key, bpm_val)
|
|
|
|
output = [f"📦 Sample Pack: {genre.title()}", "=" * 50]
|
|
if key:
|
|
output.append(f"Key: {key}")
|
|
if bpm_val:
|
|
output.append(f"BPM: {bpm}")
|
|
output.append("")
|
|
|
|
total = 0
|
|
for category, samples in pack.items():
|
|
if samples:
|
|
count = len(samples)
|
|
total += count
|
|
output.append(f"{category.replace('_', ' ').title()}: {count} samples")
|
|
for s in samples[:2]: # Mostrar solo 2 por categoría
|
|
key_info = f" [{s.key}]" if s.key else ""
|
|
bpm_info = f" {s.bpm:.0f}BPM" if s.bpm else ""
|
|
output.append(f" - {s.name}{key_info}{bpm_info}")
|
|
if len(samples) > 2:
|
|
output.append(f" ... y {len(samples) - 2} más")
|
|
output.append("")
|
|
|
|
output.append(f"Total: {total} samples")
|
|
return "\n".join(output)
|
|
|
|
except Exception as e:
|
|
return f"✗ Error: {str(e)}"
|
|
|
|
|
|
|
|
# ============================================================================
|
|
# HERRAMIENTAS MCP - QA Validation (Phase 7)
|
|
# ============================================================================
|
|
|
|
# Constants for QA validation
|
|
QA_AUDIO_RESAMPLE_TRACK_PREFIXES = (
|
|
"AUDIO RESAMPLE REVERSE FX",
|
|
"AUDIO RESAMPLE RISER",
|
|
"AUDIO RESAMPLE DOWNLIFTER",
|
|
"AUDIO RESAMPLE STUTTER",
|
|
)
|
|
|
|
QA_EXPECTED_BUS_KEYS = ("drums", "bass", "music", "vocal", "fx")
|
|
|
|
QA_PROBLEMATIC_VOLUME_THRESHOLD_LOW = 0.3
|
|
QA_PROBLEMATIC_VOLUME_THRESHOLD_HIGH = 0.95
|
|
QA_EMPTY_CLIP_DETECTION_THRESHOLD = 0
|
|
QA_VALID_MAIN_ROUTING_NAMES = {"MAIN", "MASTER", "EXT. OUT", "SENDS ONLY"}
|
|
|
|
QA_MIN_NOTES_PER_CLIP = 1
|
|
QA_MAX_EMPTY_MIDI_CLIPS_WARNING = 3
|
|
|
|
QA_CRITICAL_TRACK_ROLES = {
|
|
"kick": {"KICK", "AUDIO KICK"},
|
|
"bass": {"BASS", "SUB BASS", "AUDIO BASS", "AUDIO BASS LOOP"},
|
|
"clap": {"CLAP", "SNARE", "AUDIO CLAP"},
|
|
"hat": {"HAT", "HAT CLOSED", "HAT OPEN", "AUDIO HAT"},
|
|
"lead": {"LEAD", "SYNTH PEAK", "AUDIO SYNTH PEAK"},
|
|
"chords": {"CHORDS", "SYNTH LOOP", "AUDIO SYNTH LOOP"},
|
|
"atmos": {"ATMOS", "DRONE", "PAD", "AUDIO ATMOS"},
|
|
}
|
|
|
|
QA_EXPORT_READINESS_CHECKS = {
|
|
"master_volume_range": (0.75, 0.95),
|
|
"master_has_limiter": True,
|
|
"min_track_count": 6,
|
|
"min_bus_count": 3,
|
|
"max_clipping_tracks": 0,
|
|
"min_return_tracks": 2,
|
|
"min_audio_layers": 2,
|
|
"max_empty_tracks_ratio": 0.3,
|
|
}
|
|
|
|
QA_ACTIONABLE_FIXES = {
|
|
"empty_midi_clip": {
|
|
"fix": "Double-click the clip to open the piano roll and add notes, or delete the empty clip",
|
|
"mcp_command": None,
|
|
},
|
|
"bus_no_input": {
|
|
"fix": "Route tracks to this bus: select track(s) and set Output Routing to this bus",
|
|
"mcp_command": "set_track_routing",
|
|
},
|
|
"return_no_sends": {
|
|
"fix": "Add send levels to this return: select track and adjust Send A/B/C to desired level",
|
|
"mcp_command": "set_track_send",
|
|
},
|
|
"missing_critical_layer": {
|
|
"fix": "Regenerate the track or manually add a {role} layer (MIDI or Audio)",
|
|
"mcp_command": "generate_track",
|
|
},
|
|
"missing_resample_layer": {
|
|
"fix": "Run audio resampling on the reference track, or check if reference analysis completed",
|
|
"mcp_command": None,
|
|
},
|
|
"clipping_track": {
|
|
"fix": "Reduce track volume by 3-6dB and use a limiter on the master",
|
|
"mcp_command": "set_track_volume",
|
|
},
|
|
"master_too_low": {
|
|
"fix": "Increase master volume to 0.85 for proper export level",
|
|
"mcp_command": "set_track_volume",
|
|
},
|
|
"master_too_high": {
|
|
"fix": "Reduce master volume to 0.85 to prevent clipping on export",
|
|
"mcp_command": "set_track_volume",
|
|
},
|
|
"no_returns": {
|
|
"fix": "Create return tracks for reverb (Space) and delay (Echo) effects",
|
|
"mcp_command": None,
|
|
},
|
|
"insufficient_buses": {
|
|
"fix": "Create buses for drums, bass, music to enable proper mixing",
|
|
"mcp_command": "create_bus",
|
|
},
|
|
}
|
|
|
|
QA_DERIVED_FX_ROLE_MAP = {
|
|
"AUDIO RESAMPLE REVERSE FX": {"role": "reverse_fx", "bus": "fx", "expected_in_sections": ["build", "break"]},
|
|
"AUDIO RESAMPLE RISER": {"role": "riser", "bus": "fx", "expected_in_sections": ["build", "intro"]},
|
|
"AUDIO RESAMPLE DOWNLIFTER": {"role": "downlifter", "bus": "fx", "expected_in_sections": ["drop", "break"]},
|
|
"AUDIO RESAMPLE STUTTER": {"role": "stutter", "bus": "vocal", "expected_in_sections": ["break", "drop"]},
|
|
}
|
|
|
|
QA_COMMON_RETURN_NAMES = {
|
|
"SPACE": {"sends": ["space"], "typical_devices": ["Hybrid Reverb", "Reverb", "Convolution"]},
|
|
"ECHO": {"sends": ["echo"], "typical_devices": ["Echo", "Delay", "Ping Pong"]},
|
|
"HEAT": {"sends": ["heat"], "typical_devices": ["Saturator", "Distortion"]},
|
|
"GLUE": {"sends": ["glue"], "typical_devices": ["Glue Compressor", "Compressor"]},
|
|
"REVERB": {"sends": ["reverb"], "typical_devices": ["Hybrid Reverb", "Reverb"]},
|
|
"DELAY": {"sends": ["delay"], "typical_devices": ["Echo", "Delay"]},
|
|
}
|
|
|
|
|
|
def _extract_bus_payload(response: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|
if _is_error_response(response):
|
|
return []
|
|
result = response.get("result", {})
|
|
if isinstance(result, dict):
|
|
return list(result.get("buses", []) or [])
|
|
if isinstance(result, list):
|
|
return result
|
|
return []
|
|
|
|
|
|
def _track_arrangement_clip_count(track: Dict[str, Any]) -> int:
|
|
try:
|
|
return int(track.get("arrangement_clip_count", 0) or 0)
|
|
except Exception:
|
|
return 0
|
|
|
|
|
|
def _is_utility_track_name(track_name: str) -> bool:
|
|
normalized = _normalize_track_name(track_name)
|
|
return (
|
|
not normalized
|
|
or "GUIDE" in normalized
|
|
or normalized.startswith("SC TRIGGER")
|
|
or normalized.startswith("REFERENCE ")
|
|
)
|
|
|
|
|
|
def _expected_audio_replacement_tracks() -> Set[str]:
|
|
targets: Set[str] = set()
|
|
for names in REFERENCE_AUDIO_MUTE_MAP.values():
|
|
for name in names:
|
|
targets.add(_normalize_track_name(name))
|
|
return targets
|
|
|
|
|
|
def _is_expected_replacement_mute(track_name: str) -> bool:
|
|
normalized = _normalize_track_name(track_name)
|
|
return normalized in _expected_audio_replacement_tracks()
|
|
|
|
|
|
def _find_audio_replacement_sources(track_name: str) -> List[str]:
|
|
normalized = _normalize_track_name(track_name)
|
|
sources: List[str] = []
|
|
for audio_track, target_names in REFERENCE_AUDIO_MUTE_MAP.items():
|
|
if normalized in {_normalize_track_name(name) for name in target_names}:
|
|
matched_audio_track = _match_audio_track_template(audio_track, REFERENCE_AUDIO_MUTE_MAP) or audio_track
|
|
sources.append(matched_audio_track)
|
|
return sources
|
|
|
|
|
|
def _build_bus_sender_map(tracks: List[Dict[str, Any]], buses: List[Dict[str, Any]]) -> Dict[str, List[str]]:
|
|
sender_map: Dict[str, List[str]] = {}
|
|
bus_names = {_normalize_track_name(bus.get("name", "")) for bus in buses if isinstance(bus, dict)}
|
|
for bus_name in bus_names:
|
|
if bus_name:
|
|
sender_map[bus_name] = []
|
|
|
|
for track in tracks:
|
|
if not isinstance(track, dict):
|
|
continue
|
|
track_name = _normalize_track_name(track.get("name", ""))
|
|
destination = _normalize_track_name(track.get("current_output_routing", ""))
|
|
if not destination or destination not in sender_map:
|
|
continue
|
|
if track_name == destination:
|
|
continue
|
|
sender_map[destination].append(track_name)
|
|
return sender_map
|
|
|
|
|
|
def _qa_log_issue(issues: List[Dict[str, Any]], severity: str, category: str, message: str, details: Optional[Dict[str, Any]] = None) -> None:
|
|
"""Helper para registrar problemas encontrados durante QA."""
|
|
issue = {
|
|
"severity": severity,
|
|
"category": category,
|
|
"message": message,
|
|
"timestamp": time.time(),
|
|
}
|
|
if details:
|
|
issue["details"] = details
|
|
issues.append(issue)
|
|
log_level = logging.WARNING if severity in ("warning", "error") else logging.INFO
|
|
logger.log(log_level, f"[QA-{severity.upper()}] {category}: {message}")
|
|
|
|
|
|
@mcp.tool()
|
|
def validate_set(ctx: Context, check_routing: bool = True, check_gain: bool = True, check_clips: bool = True) -> str:
|
|
"""
|
|
Valida el set completo buscando problemas comunes.
|
|
|
|
Args:
|
|
check_routing: Verificar routing de tracks
|
|
check_gain: Verificar niveles de gain staging
|
|
check_clips: Verificar clips vacios
|
|
|
|
Returns:
|
|
JSON con el reporte de problemas encontrados
|
|
"""
|
|
issues: List[Dict[str, Any]] = []
|
|
ableton = get_ableton_connection()
|
|
|
|
try:
|
|
# Obtener informacion de tracks
|
|
tracks_response = ableton.send_command("get_tracks")
|
|
if _is_error_response(tracks_response):
|
|
return json.dumps({"error": tracks_response.get("message", "No se pudieron obtener tracks")})
|
|
|
|
tracks = _extract_tracks_payload(tracks_response)
|
|
|
|
# 1. Verificar tracks mudos inesperados
|
|
_validate_muted_tracks(ableton, tracks, issues)
|
|
|
|
# 2. Verificar clips vacios
|
|
if check_clips:
|
|
_validate_empty_clips(ableton, tracks, issues)
|
|
|
|
# 3. Verificar returns inutiles
|
|
_validate_returns(ableton, issues)
|
|
|
|
# 3.5. Verificar MIDI clips sin notas
|
|
_validate_empty_midi_clips(ableton, tracks, issues)
|
|
|
|
# 4. Verificar routing roto
|
|
if check_routing:
|
|
_validate_routing(ableton, tracks, issues)
|
|
|
|
# 5. Verificar gain staging
|
|
if check_gain:
|
|
_validate_gain_staging(ableton, tracks, issues)
|
|
|
|
# Generar reporte
|
|
report = _generate_qa_report(issues, "Set Validation")
|
|
|
|
return json.dumps(report, indent=2)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error en validate_set: {e}")
|
|
return json.dumps({"error": str(e), "issues": issues})
|
|
|
|
|
|
@mcp.tool()
|
|
def validate_audio_layers(ctx: Context, check_files: bool = True, check_positions: bool = True) -> str:
|
|
"""
|
|
Valida especificamente los tracks AUDIO RESAMPLE.
|
|
|
|
Args:
|
|
check_files: Verificar que los archivos de audio existen
|
|
check_positions: Verificar que las posiciones son validas
|
|
|
|
Returns:
|
|
JSON con el reporte de problemas encontrados
|
|
"""
|
|
issues: List[Dict[str, Any]] = []
|
|
ableton = get_ableton_connection()
|
|
|
|
try:
|
|
# Obtener tracks
|
|
tracks_response = ableton.send_command("get_tracks")
|
|
if _is_error_response(tracks_response):
|
|
return json.dumps({"error": tracks_response.get("message", "No se pudieron obtener tracks")})
|
|
|
|
tracks = _extract_tracks_payload(tracks_response)
|
|
|
|
# Filtrar tracks AUDIO RESAMPLE
|
|
resample_tracks = [
|
|
track for track in tracks
|
|
if isinstance(track, dict) and any(
|
|
str(track.get("name", "")).strip().upper().startswith(prefix)
|
|
for prefix in QA_AUDIO_RESAMPLE_TRACK_PREFIXES
|
|
)
|
|
]
|
|
|
|
if not resample_tracks:
|
|
_qa_log_issue(issues, "info", "audio_layers", "No se encontraron tracks AUDIO RESAMPLE")
|
|
report = _generate_qa_report(issues, "Audio Layers Validation")
|
|
return json.dumps(report, indent=2)
|
|
|
|
bus_response = ableton.send_command("list_buses")
|
|
buses = _extract_bus_payload(bus_response)
|
|
bus_name_by_key = {}
|
|
for bus in buses:
|
|
if not isinstance(bus, dict):
|
|
continue
|
|
bus_key = str(bus.get("bus_key", "") or "").strip().lower()
|
|
bus_name = _normalize_track_name(bus.get("name", ""))
|
|
if bus_key and bus_name:
|
|
bus_name_by_key[bus_key] = bus_name
|
|
|
|
# Validar cada track AUDIO RESAMPLE
|
|
for track in resample_tracks:
|
|
track_index = int(track.get("index", -1))
|
|
track_name = str(track.get("name", "UNKNOWN"))
|
|
normalized_name = _normalize_track_name(track_name)
|
|
template_name = _match_audio_track_template(normalized_name, AUDIO_TRACK_BUS_KEYS)
|
|
|
|
# Verificar bus routing correcto
|
|
expected_bus = AUDIO_TRACK_BUS_KEYS.get(template_name) if template_name else None
|
|
if expected_bus:
|
|
try:
|
|
routing_response = ableton.send_command("get_track_routing", {"track_index": track_index})
|
|
if not _is_error_response(routing_response):
|
|
current_output = _normalize_track_name(routing_response.get("result", {}).get("current_output_routing", ""))
|
|
expected_bus_name = bus_name_by_key.get(expected_bus, expected_bus.upper())
|
|
if current_output not in {expected_bus_name, "MAIN", "MASTER"}:
|
|
_qa_log_issue(issues, "warning", "audio_layers_routing",
|
|
f"{track_name}: routing a '{current_output}' no coincide con bus esperado '{expected_bus_name}'",
|
|
{"track_index": track_index, "expected_bus": expected_bus_name, "current_routing": current_output})
|
|
except Exception as e:
|
|
_qa_log_issue(issues, "warning", "audio_layers_routing",
|
|
f"{track_name}: error verificando routing: {e}")
|
|
else:
|
|
_qa_log_issue(issues, "info", "audio_layers_bus",
|
|
f"{track_name}: no tiene bus definido en AUDIO_TRACK_BUS_KEYS")
|
|
|
|
# Verificar volumen segun perfil de mix
|
|
profile_template = _match_audio_track_template(normalized_name, AUDIO_LAYER_MIX_PROFILES)
|
|
mix_profile = AUDIO_LAYER_MIX_PROFILES.get(profile_template) if profile_template else None
|
|
if mix_profile:
|
|
expected_volume = float(mix_profile.get("volume", 0.7))
|
|
try:
|
|
current_volume = float(track.get("volume", 0.7))
|
|
volume_diff = abs(current_volume - expected_volume)
|
|
if volume_diff > 0.2:
|
|
_qa_log_issue(issues, "warning", "audio_layers_volume",
|
|
f"{track_name}: volumen {current_volume:.2f} difiere significativamente del perfil {expected_volume:.2f}",
|
|
{"track_index": track_index, "current_volume": current_volume, "expected_volume": expected_volume})
|
|
except Exception:
|
|
pass
|
|
|
|
arrangement_clips = _track_arrangement_clip_count(track)
|
|
if arrangement_clips <= QA_EMPTY_CLIP_DETECTION_THRESHOLD:
|
|
_qa_log_issue(issues, "warning", "audio_layers_clips",
|
|
f"{track_name}: no tiene clips en arrangement",
|
|
{"track_index": track_index, "arrangement_clip_count": arrangement_clips})
|
|
|
|
# Generar reporte
|
|
report = _generate_qa_report(issues, "Audio Layers Validation")
|
|
return json.dumps(report, indent=2)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error en validate_audio_layers: {e}")
|
|
return json.dumps({"error": str(e), "issues": issues})
|
|
|
|
|
|
@mcp.tool()
|
|
def detect_common_issues(ctx: Context) -> str:
|
|
"""
|
|
Detecta problemas frecuentes en el set actual.
|
|
|
|
Returns:
|
|
JSON con la lista de problemas detectados y sugerencias de correccion
|
|
"""
|
|
issues: List[Dict[str, Any]] = []
|
|
suggestions: List[Dict[str, Any]] = []
|
|
ableton = get_ableton_connection()
|
|
|
|
try:
|
|
# Obtener informacion general
|
|
tracks_response = ableton.send_command("get_tracks")
|
|
session_response = ableton.send_command("get_session_info")
|
|
|
|
if _is_error_response(tracks_response) or _is_error_response(session_response):
|
|
return json.dumps({"error": "No se pudo obtener informacion del set"})
|
|
|
|
tracks = _extract_tracks_payload(tracks_response)
|
|
session_info = session_response.get("result", {})
|
|
|
|
# Detectar: Demasiados tracks mudos
|
|
muted_count = sum(1 for t in tracks if isinstance(t, dict) and t.get("mute", False))
|
|
total_tracks = len(tracks)
|
|
if total_tracks > 0 and muted_count > total_tracks * 0.5:
|
|
_qa_log_issue(issues, "warning", "common_issues",
|
|
f"Demasiados tracks mudos: {muted_count}/{total_tracks} ({muted_count/total_tracks*100:.0f}%)",
|
|
{"muted_count": muted_count, "total_tracks": total_tracks})
|
|
suggestions.append({
|
|
"issue": "too_many_muted",
|
|
"suggestion": "Considera eliminar tracks mudos que no se usan o crear un preset de mute por seccion",
|
|
"command": "unmute_all_except",
|
|
})
|
|
|
|
# Detectar: Master muy alto o muy bajo
|
|
try:
|
|
master_response = ableton.send_command("get_track_info", {"track_type": "master", "track_index": 0})
|
|
if not _is_error_response(master_response):
|
|
master_volume = float(master_response.get("result", {}).get("volume", 0.85))
|
|
if master_volume > QA_PROBLEMATIC_VOLUME_THRESHOLD_HIGH:
|
|
_qa_log_issue(issues, "error", "common_issues",
|
|
f"Master volume muy alto: {master_volume:.2f} (riesgo de clipping)",
|
|
{"master_volume": master_volume})
|
|
suggestions.append({
|
|
"issue": "master_too_high",
|
|
"suggestion": "Reducir master a 0.85 (unity) o menos",
|
|
"command": "set_track_volume",
|
|
"params": {"track_type": "master", "track_index": 0, "volume": 0.85},
|
|
})
|
|
elif master_volume < QA_PROBLEMATIC_VOLUME_THRESHOLD_LOW:
|
|
_qa_log_issue(issues, "warning", "common_issues",
|
|
f"Master volume muy bajo: {master_volume:.2f}",
|
|
{"master_volume": master_volume})
|
|
except Exception:
|
|
pass
|
|
|
|
# Detectar: BPM extremo
|
|
bpm = float(session_info.get("tempo", 120))
|
|
if bpm < 60 or bpm > 200:
|
|
_qa_log_issue(issues, "warning", "common_issues",
|
|
f"BPM fuera de rango tipico: {bpm}",
|
|
{"bpm": bpm})
|
|
|
|
# Detectar: Sin returns configurados
|
|
num_returns = int(session_info.get("num_return_tracks", 0))
|
|
if num_returns == 0:
|
|
_qa_log_issue(issues, "info", "common_issues",
|
|
"No hay return tracks configurados - considera agregar reverb/delay para mezcla")
|
|
suggestions.append({
|
|
"issue": "no_returns",
|
|
"suggestion": "Crear returns para efectos comunes (reverb, delay)",
|
|
})
|
|
|
|
# Detectar: Tracks sin nombre generico
|
|
generic_names = 0
|
|
for track in tracks:
|
|
if isinstance(track, dict):
|
|
name = str(track.get("name", "")).strip().lower()
|
|
if not name or name in ("midi track", "audio track", "track", "new track"):
|
|
generic_names += 1
|
|
if generic_names > 0:
|
|
_qa_log_issue(issues, "info", "common_issues",
|
|
f"{generic_names} tracks con nombres genericos",
|
|
{"generic_names_count": generic_names})
|
|
|
|
# Detectar: Tracks sin color (color 0 o sin definir)
|
|
uncolored = sum(1 for t in tracks if isinstance(t, dict) and int(t.get("color", 0)) == 0)
|
|
if uncolored > 0:
|
|
_qa_log_issue(issues, "info", "common_issues",
|
|
f"{uncolored} tracks sin color asignado")
|
|
|
|
# Detectar: Solo activo en un track
|
|
soloed = [t for t in tracks if isinstance(t, dict) and t.get("solo", False)]
|
|
if len(soloed) == 1:
|
|
_qa_log_issue(issues, "warning", "common_issues",
|
|
f"Solo activo en un track: {soloed[0].get('name', 'UNKNOWN')} - posible error",
|
|
{"soloed_track": soloed[0].get("name")})
|
|
suggestions.append({
|
|
"issue": "single_solo",
|
|
"suggestion": "Desactivar solo o agregar mas tracks en solo",
|
|
})
|
|
|
|
# Generar reporte
|
|
report = _generate_qa_report(issues, "Common Issues Detection")
|
|
report["suggestions"] = suggestions
|
|
report["session_info"] = {
|
|
"bpm": bpm,
|
|
"total_tracks": total_tracks,
|
|
"muted_tracks": muted_count,
|
|
"num_returns": num_returns,
|
|
}
|
|
|
|
return json.dumps(report, indent=2)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error en detect_common_issues: {e}")
|
|
return json.dumps({"error": str(e), "issues": issues})
|
|
|
|
|
|
@mcp.tool()
|
|
def diagnose_generated_set(ctx: Context, sections: List[Dict[str, Any]] = None) -> str:
|
|
"""
|
|
Diagnostica el set generado y retorna informacion util.
|
|
|
|
Esta funcion analiza la estructura del set generado y proporciona
|
|
informacion diagnostica sobre tracks, buses, capas de audio y
|
|
posibles problemas de mezcla.
|
|
|
|
Args:
|
|
sections: Lista opcional de secciones para analisis adicional
|
|
|
|
Returns:
|
|
JSON con diagnostico detallado del set
|
|
"""
|
|
diagnosis = {
|
|
"total_tracks": 0,"bus_count": 0,
|
|
"return_count": 0,
|
|
"audio_track_count": 0,
|
|
"audio_resample_count": 0,
|
|
"empty_arrangement_tracks": [],
|
|
"muted_tracks": [],
|
|
"muted_replaced_tracks": [],
|
|
"unexpected_muted_tracks": [],
|
|
"buses_without_signal": [],
|
|
"buses_without_routes": [],
|
|
"missing_critical_layers": [],
|
|
"missing_derived_fx_layers": [],
|
|
"derived_fx_layers_status": {},
|
|
"mixing_warnings": [],
|
|
"export_readiness": {"ready": True, "issues": []},
|
|
"suggestions": [],
|
|
}
|
|
|
|
ableton = get_ableton_connection()
|
|
|
|
try:
|
|
tracks_response = ableton.send_command("get_tracks")
|
|
if _is_error_response(tracks_response):
|
|
return json.dumps({"error": tracks_response.get("message", "No se pudieron obtener tracks"), **diagnosis})
|
|
|
|
tracks = _extract_tracks_payload(tracks_response)
|
|
diagnosis["total_tracks"] = len(tracks)
|
|
|
|
session_response = ableton.send_command("get_session_info")
|
|
if not _is_error_response(session_response):
|
|
diagnosis["return_count"] = int(session_response.get("result", {}).get("num_return_tracks", 0) or 0)
|
|
|
|
bus_response = ableton.send_command("list_buses")
|
|
buses = _extract_bus_payload(bus_response)
|
|
diagnosis["bus_count"] = len(buses)
|
|
bus_names = {_normalize_track_name(bus.get("name", "")) for bus in buses if isinstance(bus, dict)}
|
|
bus_sender_map = _build_bus_sender_map(tracks, buses)
|
|
|
|
master_volume = 0.85
|
|
master_response = ableton.send_command("get_track_info", {"track_type": "master", "track_index": 0})
|
|
if not _is_error_response(master_response):
|
|
master_volume = float(master_response.get("result", {}).get("volume", 0.85))
|
|
diagnosis["master_volume"] = master_volume
|
|
|
|
found_critical_layers = {role: False for role in QA_CRITICAL_TRACK_ROLES}
|
|
derived_fx_status = {prefix: {"found": False, "has_clips": False, "routed_correctly": False}
|
|
for prefix in QA_AUDIO_RESAMPLE_TRACK_PREFIXES}
|
|
track_names_set = set()
|
|
|
|
for track in tracks:
|
|
if not isinstance(track, dict):
|
|
continue
|
|
|
|
name = _normalize_track_name(track.get("name", ""))
|
|
track_index = int(track.get("index", -1))
|
|
track_names_set.add(name)
|
|
|
|
is_audio_resample = False
|
|
for prefix in QA_AUDIO_RESAMPLE_TRACK_PREFIXES:
|
|
if name.startswith(_normalize_track_name(prefix)):
|
|
is_audio_resample = True
|
|
diagnosis["audio_resample_count"] += 1
|
|
derived_fx_status[prefix]["found"] = True
|
|
arrangement_clips = _track_arrangement_clip_count(track)
|
|
if arrangement_clips > 0:
|
|
derived_fx_status[prefix]["has_clips"] = True
|
|
|
|
expected_bus_info = QA_DERIVED_FX_ROLE_MAP.get(prefix, {})
|
|
expected_bus = expected_bus_info.get("bus", "fx")
|
|
current_routing = _normalize_track_name(track.get("current_output_routing", ""))
|
|
bus_match = any(bn in current_routing for bn in bus_names if expected_bus in bn.lower())
|
|
if bus_match or current_routing in QA_VALID_MAIN_ROUTING_NAMES:
|
|
derived_fx_status[prefix]["routed_correctly"] = True
|
|
|
|
if name.startswith("AUDIO ") and not is_audio_resample:
|
|
diagnosis["audio_track_count"] += 1
|
|
|
|
for role, role_names in QA_CRITICAL_TRACK_ROLES.items():
|
|
if any(rn in name for rn in role_names):
|
|
found_critical_layers[role] = True
|
|
|
|
if track.get("mute", False):
|
|
rendered_name = str(track.get("name", f"Track {track_index}"))
|
|
diagnosis["muted_tracks"].append(rendered_name)
|
|
if _is_expected_replacement_mute(rendered_name):
|
|
diagnosis["muted_replaced_tracks"].append(rendered_name)
|
|
elif not _is_utility_track_name(rendered_name):
|
|
diagnosis["unexpected_muted_tracks"].append(rendered_name)
|
|
|
|
if (_track_arrangement_clip_count(track) <= QA_EMPTY_CLIP_DETECTION_THRESHOLD
|
|
and name not in bus_names
|
|
and not _is_utility_track_name(name)):
|
|
diagnosis["empty_arrangement_tracks"].append(str(track.get("name", f"Track {track_index}")))
|
|
|
|
diagnosis["derived_fx_layers_status"] = derived_fx_status
|
|
for prefix, status in derived_fx_status.items():
|
|
if not status["found"]:
|
|
diagnosis["missing_derived_fx_layers"].append(prefix)
|
|
fix_info = QA_ACTIONABLE_FIXES.get("missing_resample_layer", {})
|
|
diagnosis["suggestions"].append(
|
|
f"Add {prefix} layer: {fix_info.get('fix', 'Check if audio resampling completed during generation')}"
|
|
)
|
|
elif not status["has_clips"]:
|
|
diagnosis["mixing_warnings"].append(f"Derived FX track '{prefix}' exists but has no clips")
|
|
diagnosis["suggestions"].append(f"Regenerate {prefix} audio or verify source audio for resampling")
|
|
elif not status["routed_correctly"]:
|
|
diagnosis["mixing_warnings"].append(f"Derived FX track '{prefix}' may have incorrect routing")
|
|
expected_bus = QA_DERIVED_FX_ROLE_MAP.get(prefix, {}).get("bus", "FX")
|
|
diagnosis["suggestions"].append(f"Route {prefix} to {expected_bus.upper()} bus for proper mixing")
|
|
|
|
for bus in buses:
|
|
bus_name = _normalize_track_name(bus.get("name", ""))
|
|
senders = bus_sender_map.get(bus_name, [])
|
|
if not senders:
|
|
rendered_name = str(bus.get("name", ""))
|
|
diagnosis["buses_without_signal"].append(rendered_name)
|
|
diagnosis["buses_without_routes"].append(rendered_name)
|
|
fix_info = QA_ACTIONABLE_FIXES.get("bus_no_input", {})
|
|
bus_key = next((k for k, v in {"DRUMS": ["drums"], "BASS": ["bass"], "MUSIC": ["music"], "VOCAL": ["vocal"], "FX": ["fx"]}.items() if bus_name in v), None)
|
|
expected_tracks = []
|
|
if bus_key == "DRUMS":
|
|
expected_tracks = ["KICK", "CLAP", "HAT", "PERC"]
|
|
elif bus_key == "BASS":
|
|
expected_tracks = ["BASS", "SUB BASS"]
|
|
elif bus_key == "MUSIC":
|
|
expected_tracks = ["LEAD", "SYNTH", "CHORDS", "PAD"]
|
|
elif bus_key == "VOCAL":
|
|
expected_tracks = ["VOCAL", "VOCAL CHOP"]
|
|
elif bus_key == "FX":
|
|
expected_tracks = ["ATMOS", "RISER", "CRASH"]
|
|
|
|
if expected_tracks:
|
|
diagnosis["suggestions"].append(
|
|
f"Route {', '.join(expected_tracks[:3])} tracks to {rendered_name} bus for proper mixing"
|
|
)
|
|
else:
|
|
diagnosis["suggestions"].append(
|
|
f"Route tracks to {rendered_name} bus: {fix_info.get('fix', 'Set Output Routing on source tracks')}" )
|
|
|
|
for critical_name, alternatives in QA_CRITICAL_TRACK_ROLES.items():
|
|
if not any(_normalize_track_name(option) in track_names_set for option in alternatives):
|
|
if not found_critical_layers[critical_name]:
|
|
diagnosis["missing_critical_layers"].append({
|
|
"role": critical_name,
|
|
"suggested_track_names": list(alternatives)[:3],
|
|
"suggestion": f"Add {critical_name} layer (MIDI or Audio) for complete mix"
|
|
})
|
|
|
|
if diagnosis["bus_count"] < 3:
|
|
diagnosis["mixing_warnings"].append(f"Low bus count: {diagnosis['bus_count']} (expected 3-5)")
|
|
if diagnosis["audio_track_count"] == 0:
|
|
diagnosis["mixing_warnings"].append("No AUDIO tracks found - set may not be properly generated")
|
|
diagnosis["suggestions"].append("Run generate_track() to create audio layers")
|
|
|
|
if diagnosis["audio_resample_count"] < 3:
|
|
diagnosis["mixing_warnings"].append(f"Low RESAMPLE count: {diagnosis['audio_resample_count']} (expected 3-4)")
|
|
diagnosis["suggestions"].append("Check if audio resampling completed during generation")
|
|
|
|
if diagnosis["return_count"] < 2:
|
|
diagnosis["mixing_warnings"].append(f"Low return count: {diagnosis['return_count']} (expected 2-4)")
|
|
diagnosis["suggestions"].append("Add return tracks for reverb/delay effects")
|
|
|
|
if diagnosis["unexpected_muted_tracks"]:
|
|
diagnosis["mixing_warnings"].append(f"{len(diagnosis['unexpected_muted_tracks'])} unexpected muted tracks")
|
|
diagnosis["suggestions"].append("Review muted tracks: " + ", ".join(diagnosis['unexpected_muted_tracks'][:3]))
|
|
|
|
if diagnosis["empty_arrangement_tracks"]:
|
|
diagnosis["mixing_warnings"].append(f"{len(diagnosis['empty_arrangement_tracks'])} tracks without arrangement clips")
|
|
diagnosis["suggestions"].append("Check if Session-to-Arrangement commit completed")
|
|
|
|
if diagnosis["buses_without_routes"]:
|
|
diagnosis["mixing_warnings"].append(f"Buses without routed senders: {', '.join(diagnosis['buses_without_routes'])}")
|
|
diagnosis["suggestions"].append("Route tracks to appropriate buses")
|
|
|
|
if diagnosis["missing_critical_layers"]:
|
|
missing_str = ", ".join([layer["role"] for layer in diagnosis["missing_critical_layers"]])
|
|
diagnosis["mixing_warnings"].append(f"Missing critical layers: {missing_str}")
|
|
diagnosis["suggestions"].append("Regenerate missing critical layers")
|
|
|
|
ready = True
|
|
if master_volume < QA_EXPORT_READINESS_CHECKS["master_volume_range"][0]:
|
|
ready = False
|
|
diagnosis["export_readiness"]["issues"].append({
|
|
"issue": "master_volume_low",
|
|
"message": f"Master volume too low: {master_volume:.2f}",
|
|
"suggestion": f"Increase to {QA_EXPORT_READINESS_CHECKS['master_volume_range'][0]:.2f} or higher"
|
|
})
|
|
elif master_volume > QA_EXPORT_READINESS_CHECKS["master_volume_range"][1]:
|
|
ready = False
|
|
diagnosis["export_readiness"]["issues"].append({
|
|
"issue": "master_volume_high",
|
|
"message": f"Master volume too high: {master_volume:.2f}",
|
|
"suggestion": f"Reduce to {QA_EXPORT_READINESS_CHECKS['master_volume_range'][1]:.2f} or lower to prevent clipping"
|
|
})
|
|
|
|
if diagnosis["bus_count"] < QA_EXPORT_READINESS_CHECKS["min_bus_count"]:
|
|
ready = False
|
|
diagnosis["export_readiness"]["issues"].append({
|
|
"issue": "insufficient_buses",
|
|
"message": f"Only {diagnosis['bus_count']} buses (need {QA_EXPORT_READINESS_CHECKS['min_bus_count']}+)",
|
|
"suggestion": QA_ACTIONABLE_FIXES.get("insufficient_buses", {}).get("fix", "Create buses for drums, bass, music for proper mixing")
|
|
})
|
|
diagnosis["suggestions"].append("Create DRUMS, BASS, MUSIC buses and route tracks to them")
|
|
|
|
if diagnosis["total_tracks"] < QA_EXPORT_READINESS_CHECKS["min_track_count"]:
|
|
ready = False
|
|
diagnosis["export_readiness"]["issues"].append({
|
|
"issue": "insufficient_tracks",
|
|
"message": f"Only {diagnosis['total_tracks']} tracks (need {QA_EXPORT_READINESS_CHECKS['min_track_count']}+)",
|
|
"suggestion": "Run generate_track() with more layers or add MIDI/Audio tracks manually"
|
|
})
|
|
|
|
if diagnosis["return_count"] < QA_EXPORT_READINESS_CHECKS.get("min_return_tracks", 2):
|
|
diagnosis["export_readiness"]["issues"].append({
|
|
"issue": "insufficient_returns",
|
|
"message": f"Only {diagnosis['return_count']} return tracks (need {QA_EXPORT_READINESS_CHECKS.get('min_return_tracks', 2)}+)",
|
|
"suggestion": QA_ACTIONABLE_FIXES.get("no_returns", {}).get("fix", "Create return tracks for reverb and delay")
|
|
})
|
|
|
|
if diagnosis["audio_track_count"] < QA_EXPORT_READINESS_CHECKS.get("min_audio_layers", 2):
|
|
diagnosis["export_readiness"]["issues"].append({
|
|
"issue": "insufficient_audio_layers",
|
|
"message": f"Only {diagnosis['audio_track_count']} audio tracks (may need more audio layers)",
|
|
"suggestion": "Run generate_track() again or add audio fallback layers"
|
|
})
|
|
|
|
empty_ratio = len(diagnosis["empty_arrangement_tracks"]) / max(1, diagnosis["total_tracks"])
|
|
if empty_ratio > QA_EXPORT_READINESS_CHECKS.get("max_empty_tracks_ratio", 0.3):
|
|
diagnosis["export_readiness"]["issues"].append({
|
|
"issue": "high_empty_tracks_ratio",
|
|
"message": f"{len(diagnosis['empty_arrangement_tracks'])} empty tracks ({empty_ratio*100:.0f}% of total)",
|
|
"suggestion": "Remove unused tracks or commit Session to Arrangement"
|
|
})
|
|
|
|
clipping_count = sum(1 for t in tracks if isinstance(t, dict) and float(t.get("volume", 0)) > QA_PROBLEMATIC_VOLUME_THRESHOLD_HIGH)
|
|
if clipping_count > QA_EXPORT_READINESS_CHECKS["max_clipping_tracks"]:
|
|
diagnosis["export_readiness"]["issues"].append({
|
|
"issue": "clipping_risk",
|
|
"message": f"{clipping_count} tracks with volume > {QA_PROBLEMATIC_VOLUME_THRESHOLD_HIGH:.2f}",
|
|
"suggestion": "Reduce track volumes to prevent clipping on export"
|
|
})
|
|
|
|
if diagnosis["missing_critical_layers"]:
|
|
ready = False
|
|
diagnosis["export_readiness"]["issues"].append({
|
|
"issue": "missing_critical_layers",
|
|
"message": f"Missing layers: {', '.join([layer['role'] for layer in diagnosis['missing_critical_layers']])}",
|
|
"suggestion": "Regenerate track to include missing layers"
|
|
})
|
|
|
|
diagnosis["export_readiness"]["ready"] = ready
|
|
|
|
if not ready:
|
|
diagnosis["suggestions"].insert(0, "Fix export readiness issues before rendering")
|
|
|
|
diagnosis["timestamp"] = time.time()
|
|
diagnosis["diagnosis_version"] = "2.0"
|
|
|
|
return json.dumps(diagnosis, indent=2)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error en diagnose_generated_set: {e}")
|
|
diagnosis["error"] = str(e)
|
|
return json.dumps(diagnosis, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def get_generation_manifest(ctx: Context) -> str:
|
|
"""
|
|
Retorna el manifest de la última generación con datos reales.
|
|
|
|
Incluye:
|
|
- genre, style, bpm, key, structure
|
|
- referencia usada o null
|
|
- tracks blueprint
|
|
- buses/returns creados
|
|
- audio layers con sample paths exactos
|
|
- resample layers
|
|
- secciones y variantes usadas
|
|
"""
|
|
manifest = _get_stored_manifest()
|
|
|
|
if not manifest:
|
|
return json.dumps({
|
|
"error": "No generation manifest found. Run generate_track() first.",
|
|
"timestamp": time.time()
|
|
}, indent=2)
|
|
|
|
return json.dumps(manifest, indent=2, default=str)
|
|
|
|
|
|
def _validate_muted_tracks(ableton: "AbletonConnection", tracks: List[Dict[str, Any]], issues: List[Dict[str, Any]]) -> None:
|
|
"""Valida tracks mudos inesperados y detecta tracks que deberian estar activos."""
|
|
muted_with_content = []
|
|
muted_critical = []
|
|
unexpected_muted = []
|
|
|
|
for track in tracks:
|
|
if not isinstance(track, dict):
|
|
continue
|
|
track_name = str(track.get("name", "")).strip().upper()
|
|
track_index = int(track.get("index", -1))
|
|
normalized_name = _normalize_track_name(track_name)
|
|
|
|
if track.get("mute", False):
|
|
if _is_utility_track_name(track_name):
|
|
continue
|
|
if _is_expected_replacement_mute(track_name):
|
|
continue
|
|
|
|
clip_count = _track_arrangement_clip_count(track)
|
|
if clip_count > 0:
|
|
muted_with_content.append({
|
|
"track_index": track_index,
|
|
"track_name": track.get("name", track_index),
|
|
"clips_count": clip_count,
|
|
})
|
|
|
|
for role, role_names in QA_CRITICAL_TRACK_ROLES.items():
|
|
if any(rn in normalized_name for rn in role_names):
|
|
muted_critical.append({
|
|
"track_index": track_index,
|
|
"track_name": track.get("name", track_index),
|
|
"role": role,
|
|
})
|
|
break
|
|
|
|
if not muted_with_content and clip_count > 0:
|
|
unexpected_muted.append({
|
|
"track_index": track_index,
|
|
"track_name": track.get("name", track_index),
|
|
"suggestion": f"Unmute track '{track.get('name', track_index)}' or remove if unused",
|
|
})
|
|
|
|
for item in muted_with_content:
|
|
_qa_log_issue(issues, "warning", "muted_tracks",
|
|
f"Track '{item['track_name']}' is muted but has {item['clips_count']} arrangement clips",
|
|
{"track_index": item["track_index"], "track_name": item["track_name"], "clips_count": item["clips_count"],
|
|
"suggestion": "Unmute if this track should be audible, or delete clips if track is unused"})
|
|
|
|
for item in muted_critical:
|
|
_qa_log_issue(issues, "error", "muted_critical",
|
|
f"CRITICAL: Track '{item['track_name']}' ({item['role']}) is muted - this affects mix foundation",
|
|
{"track_index": item["track_index"], "track_name": item["track_name"], "role": item["role"],
|
|
"suggestion": f"Unmute {item['role']} track for proper mix balance"})
|
|
|
|
for item in unexpected_muted[:5]:
|
|
_qa_log_issue(issues, "info", "unexpected_muted",
|
|
f"Track '{item['track_name']}' is muted unexpectedly",
|
|
{"track_index": item["track_index"], "suggestion": item["suggestion"]})
|
|
|
|
|
|
def _validate_empty_clips(ableton: "AbletonConnection", tracks: List[Dict[str, Any]], issues: List[Dict[str, Any]]) -> None:
|
|
"""Valida tracks utiles sin contenido en Arrangement y detecta roles criticos vacios."""
|
|
bus_response = ableton.send_command("list_buses")
|
|
bus_names = {
|
|
_normalize_track_name(bus.get("name", ""))
|
|
for bus in _extract_bus_payload(bus_response)
|
|
if isinstance(bus, dict)
|
|
}
|
|
|
|
empty_critical_roles = {role: [] for role in QA_CRITICAL_TRACK_ROLES}
|
|
|
|
for track in tracks:
|
|
if not isinstance(track, dict):
|
|
continue
|
|
track_index = int(track.get("index", -1))
|
|
track_name = str(track.get("name", f"Track {track_index}"))
|
|
normalized_name = _normalize_track_name(track_name)
|
|
|
|
if normalized_name in bus_names or _is_utility_track_name(normalized_name):
|
|
continue
|
|
|
|
arrangement_clips = _track_arrangement_clip_count(track)
|
|
is_muted = track.get("mute", False)
|
|
|
|
if arrangement_clips <= QA_EMPTY_CLIP_DETECTION_THRESHOLD and not is_muted:
|
|
for role, role_names in QA_CRITICAL_TRACK_ROLES.items():
|
|
if any(rn in normalized_name for rn in role_names):
|
|
empty_critical_roles[role].append({
|
|
"track_index": track_index,
|
|
"track_name": track_name,
|
|
"role": role,
|
|
})
|
|
break
|
|
|
|
is_audio_fallback = normalized_name.startswith("AUDIO") and not normalized_name.startswith("AUDIO RESAMPLE")
|
|
if not is_audio_fallback:
|
|
_qa_log_issue(issues, "warning", "empty_clips",
|
|
f"Track '{track_name}' has no arrangement clips",
|
|
{"track_index": track_index, "arrangement_clip_count": arrangement_clips,
|
|
"suggestion": "Add content or mute track if unused"})
|
|
else:
|
|
_qa_log_issue(issues, "info", "empty_fallback_audio",
|
|
f"Audio fallback track '{track_name}' has no clips (may need regeneration)",
|
|
{"track_index": track_index, "suggestion": "Regenerate audio layers or check sample paths"})
|
|
|
|
for role, track_list in empty_critical_roles.items():
|
|
if track_list:
|
|
tracks_str = ", ".join([t["track_name"] for t in track_list[:3]])
|
|
_qa_log_issue(issues, "error", "empty_critical_role",
|
|
f"CRITICAL ROLE EMPTY: {role.upper()} track(s) have no content: {tracks_str}",
|
|
{"role": role, "tracks": track_list,
|
|
"suggestion": f"Generate content for {role} or add audio/MIDI clips to restore mix foundation"})
|
|
|
|
|
|
def _validate_returns(ableton: "AbletonConnection", issues: List[Dict[str, Any]]) -> None:
|
|
"""Valida return tracks inutiles y verifica sends activos."""
|
|
try:
|
|
session_response = ableton.send_command("get_session_info")
|
|
if _is_error_response(session_response):
|
|
return
|
|
|
|
num_returns = int(session_response.get("result", {}).get("num_return_tracks", 0))
|
|
tracks_response = ableton.send_command("get_tracks")
|
|
if _is_error_response(tracks_response):
|
|
return
|
|
tracks = _extract_tracks_payload(tracks_response)
|
|
|
|
for return_index in range(num_returns):
|
|
try:
|
|
return_info_response = ableton.send_command("get_track_info", {
|
|
"track_type": "return",
|
|
"track_index": return_index,
|
|
})
|
|
if _is_error_response(return_info_response):
|
|
continue
|
|
return_info = return_info_response.get("result", {})
|
|
return_name = str(return_info.get("name", f"Return {return_index}")).strip().upper()
|
|
|
|
devices_response = ableton.send_command("get_devices", {
|
|
"track_type": "return",
|
|
"track_index": return_index,
|
|
})
|
|
if _is_error_response(devices_response):
|
|
continue
|
|
devices = _extract_devices_payload(devices_response)
|
|
|
|
_ = return_info.get("sends", [])
|
|
has_active_sends = False
|
|
sends_to_this_return = []
|
|
|
|
_ = _normalize_track_name(return_name)
|
|
for track in tracks:
|
|
if not isinstance(track, dict):
|
|
continue
|
|
track_sends = track.get("sends", [])
|
|
if isinstance(track_sends, list):
|
|
for send_idx, send_val in enumerate(track_sends):
|
|
try:
|
|
if float(send_val) > 0.01:
|
|
if send_idx == return_index:
|
|
has_active_sends = True
|
|
track_name = track.get("name", "?")
|
|
sends_to_this_return.append(track_name)
|
|
except (TypeError, ValueError):
|
|
pass
|
|
|
|
if not devices and not has_active_sends:
|
|
fix_info = QA_ACTIONABLE_FIXES.get("return_no_sends", {})
|
|
_qa_log_issue(issues, "warning", "useless_returns",
|
|
f"Return '{return_name}' has no devices and no sends from other tracks - not processing audio",
|
|
{
|
|
"return_index": return_index,
|
|
"return_name": return_name,
|
|
"suggestion": fix_info.get("fix", "Add devices or ensure other tracks send to this return"),
|
|
})
|
|
|
|
elif not has_active_sends and devices:
|
|
_qa_log_issue(issues, "info", "return_no_sends",
|
|
f"Return '{return_name}' has devices but no sends from other tracks",
|
|
{
|
|
"return_index": return_index,
|
|
"return_name": return_name,
|
|
"suggestion": "Set send levels on tracks to route audio to this return",
|
|
})
|
|
|
|
except Exception:
|
|
pass
|
|
|
|
if num_returns == 0:
|
|
fix_info = QA_ACTIONABLE_FIXES.get("no_returns", {})
|
|
_qa_log_issue(issues, "warning", "no_returns",
|
|
"No return tracks found - mix will lack spatial effects",
|
|
{"suggestion": fix_info.get("fix", "Create return tracks for reverb and delay effects")})
|
|
|
|
except Exception as e:
|
|
logger.debug(f"Error validando returns: {e}")
|
|
|
|
|
|
def _validate_empty_midi_clips(ableton: "AbletonConnection", tracks: List[Dict[str, Any]], issues: List[Dict[str, Any]]) -> None:
|
|
"""Valida MIDI clips que existen pero no tienen notas."""
|
|
empty_midi_clips = []
|
|
tracks_with_empty_midi = []
|
|
|
|
for track in tracks:
|
|
if not isinstance(track, dict):
|
|
continue
|
|
track_index = int(track.get("index", -1))
|
|
track_name = str(track.get("name", f"Track {track_index}"))
|
|
track_type = str(track.get("type", "")).lower()
|
|
|
|
if track_type != "midi":
|
|
continue
|
|
if _is_utility_track_name(track_name):
|
|
continue
|
|
|
|
clips = track.get("clips", [])
|
|
if not isinstance(clips, list):
|
|
clips = []
|
|
|
|
has_non_empty_clip = False
|
|
empty_clips_in_track = []
|
|
|
|
for clip_idx, clip in enumerate(clips):
|
|
if not isinstance(clip, dict):
|
|
continue
|
|
|
|
clip_name = clip.get("name", f"Clip {clip_idx}")
|
|
is_playing = clip.get("is_playing", False)
|
|
has_notes = clip.get("has_notes", None)
|
|
notes_count = clip.get("notes_count", 0)
|
|
|
|
if has_notes is False or (has_notes is None and notes_count == 0):
|
|
empty_clips_in_track.append({
|
|
"clip_index": clip_idx,
|
|
"clip_name": clip_name,
|
|
"is_playing": is_playing,
|
|
})
|
|
elif has_notes is True or notes_count > 0:
|
|
has_non_empty_clip = True
|
|
|
|
if empty_clips_in_track and not has_non_empty_clip:
|
|
tracks_with_empty_midi.append({
|
|
"track_index": track_index,
|
|
"track_name": track_name,
|
|
"empty_clips_count": len(empty_clips_in_track),
|
|
})
|
|
|
|
for empty_clip in empty_clips_in_track[:3]:
|
|
empty_midi_clips.append({
|
|
"track_index": track_index,
|
|
"track_name": track_name,
|
|
"clip_index": empty_clip["clip_index"],
|
|
"clip_name": empty_clip["clip_name"],
|
|
"is_playing": empty_clip["is_playing"],
|
|
})
|
|
|
|
if len(tracks_with_empty_midi) > QA_MAX_EMPTY_MIDI_CLIPS_WARNING:
|
|
fix_info = QA_ACTIONABLE_FIXES.get("empty_midi_clip", {})
|
|
_qa_log_issue(issues, "warning", "empty_midi_tracks",
|
|
f"{len(tracks_with_empty_midi)} MIDI tracks have only empty clips - no musical content",
|
|
{
|
|
"tracks": tracks_with_empty_midi[:5],
|
|
"suggestion": fix_info.get("fix", "Add notes to MIDI clips or remove empty tracks"),
|
|
})
|
|
|
|
for clip_info in empty_midi_clips[:QA_MAX_EMPTY_MIDI_CLIPS_WARNING]:
|
|
fix_info = QA_ACTIONABLE_FIXES.get("empty_midi_clip", {})
|
|
_qa_log_issue(issues, "info", "empty_midi_clip",
|
|
f"MIDI clip '{clip_info['clip_name']}' on track '{clip_info['track_name']}' has no notes",
|
|
{
|
|
"track_index": clip_info["track_index"],
|
|
"clip_index": clip_info["clip_index"],
|
|
"suggestion": fix_info.get("fix", "Open piano roll and add notes"),
|
|
})
|
|
|
|
|
|
def _validate_routing(ableton: "AbletonConnection", tracks: List[Dict[str, Any]], issues: List[Dict[str, Any]]) -> None:
|
|
"""Valida routing roto y detecta tracks no routedos a buses esperados."""
|
|
known_destinations = {
|
|
_normalize_track_name(track.get("name", ""))
|
|
for track in tracks
|
|
if isinstance(track, dict)
|
|
}
|
|
bus_name_by_key = {}
|
|
bus_response = ableton.send_command("list_buses")
|
|
for bus in _extract_bus_payload(bus_response):
|
|
if isinstance(bus, dict):
|
|
bus_key = str(bus.get("bus_key", "") or bus.get("key", "")).strip().lower()
|
|
bus_name = _normalize_track_name(bus.get("name", ""))
|
|
if bus_key and bus_name:
|
|
bus_name_by_key[bus_key] = bus_name
|
|
known_destinations.add(bus_name)
|
|
|
|
tracks_with_broken_routing = []
|
|
tracks_missing_bus_routing = []
|
|
|
|
for track in tracks:
|
|
if not isinstance(track, dict):
|
|
continue
|
|
track_index = int(track.get("index", -1))
|
|
track_name = str(track.get("name", f"Track {track_index}"))
|
|
normalized_name = _normalize_track_name(track_name)
|
|
|
|
if _is_utility_track_name(normalized_name):
|
|
continue
|
|
|
|
expected_bus = None
|
|
for role_key, allowed_buses in BUS_ROUTING_MAP.items(): # noqa: F821
|
|
if role_key in normalized_name.lower():
|
|
expected_bus = allowed_buses
|
|
break
|
|
|
|
if normalized_name.startswith("AUDIO "):
|
|
template_name = _match_audio_track_template(normalized_name, AUDIO_TRACK_BUS_KEYS)
|
|
if template_name:
|
|
expected_bus = {AUDIO_TRACK_BUS_KEYS.get(template_name, "")}
|
|
|
|
try:
|
|
current_output = _normalize_track_name(track.get("current_output_routing", ""))
|
|
if not current_output:
|
|
routing_response = ableton.send_command("get_track_routing", {"track_index": track_index})
|
|
if _is_error_response(routing_response):
|
|
continue
|
|
routing = routing_response.get("result", {})
|
|
current_output = _normalize_track_name(routing.get("current_output_routing", ""))
|
|
|
|
if not current_output or current_output in QA_VALID_MAIN_ROUTING_NAMES or "NO OUTPUT" in current_output:
|
|
if expected_bus and normalized_name.startswith("AUDIO "):
|
|
tracks_missing_bus_routing.append({
|
|
"track_index": track_index,
|
|
"track_name": track_name,
|
|
"expected_bus": list(expected_bus)[0] if len(expected_bus) == 1 else list(expected_bus),
|
|
"current_routing": current_output or "Master",
|
|
})
|
|
continue
|
|
|
|
if current_output not in known_destinations:
|
|
tracks_with_broken_routing.append({
|
|
"track_index": track_index,
|
|
"track_name": track_name,
|
|
"routing_target": current_output,
|
|
})
|
|
_qa_log_issue(issues, "error", "broken_routing",
|
|
f"Track '{track_name}' routes to '{current_output}' which does not exist",
|
|
{"track_index": track_index, "routing_target": current_output,
|
|
"suggestion": f"Create bus '{current_output}' or route track to existing bus"})
|
|
|
|
except Exception as e:
|
|
_qa_log_issue(issues, "warning", "routing_check_error",
|
|
f"Could not check routing for track '{track_name}': {e}",
|
|
{"track_index": track_index})
|
|
|
|
for item in tracks_missing_bus_routing[:5]:
|
|
expected = item["expected_bus"]
|
|
if isinstance(expected, list):
|
|
expected_str = " or ".join(expected)
|
|
else:
|
|
expected_str = expected
|
|
_qa_log_issue(issues, "warning", "missing_bus_routing",
|
|
f"Track '{item['track_name']}' routes to {item['current_routing']} but should route to {expected_str}",
|
|
{"track_index": item["track_index"], "expected_bus": item["expected_bus"],
|
|
"current_routing": item["current_routing"],
|
|
"suggestion": f"Route track to '{expected_str}' bus for proper mixing"})
|
|
|
|
|
|
def _validate_gain_staging(ableton: "AbletonConnection", tracks: List[Dict[str, Any]], issues: List[Dict[str, Any]]) -> None:
|
|
"""Valida gain staging problematico con umbrales por tipo de track."""
|
|
clipping_tracks = []
|
|
quiet_tracks = []
|
|
pan_extreme_tracks = []
|
|
|
|
VOLUME_THRESHOLDS_BY_TRACK = {
|
|
"KICK": {"max": 0.95, "min": 0.70},
|
|
"BASS": {"max": 0.92, "min": 0.65},
|
|
"CLAP": {"max": 0.88, "min": 0.55},
|
|
"SNARE": {"max": 0.88, "min": 0.55},
|
|
"HAT": {"max": 0.78, "min": 0.45},
|
|
"AUDIO KICK": {"max": 0.95, "min": 0.80},
|
|
"AUDIO CLAP": {"max": 0.85, "min": 0.65},
|
|
"AUDIO HAT": {"max": 0.75, "min": 0.50},
|
|
"AUDIO BASS": {"max": 0.90, "min": 0.70},
|
|
"AUDIO BASS LOOP": {"max": 0.90, "min": 0.70},
|
|
"AUDIO SYNTH": {"max": 0.82, "min": 0.45},
|
|
"AUDIO VOCAL": {"max": 0.85, "min": 0.50},
|
|
"AUDIO ATMOS": {"max": 0.70, "min": 0.35},
|
|
"AUDIO RESAMPLE": {"max": 0.75, "min": 0.45},
|
|
}
|
|
|
|
for track in tracks:
|
|
if not isinstance(track, dict):
|
|
continue
|
|
track_index = int(track.get("index", -1))
|
|
track_name = str(track.get("name", f"Track {track_index}"))
|
|
normalized_name = _normalize_track_name(track_name)
|
|
if _is_utility_track_name(track_name):
|
|
continue
|
|
if normalized_name.startswith("DRUMS") or normalized_name.startswith("BASS") or normalized_name.startswith("MUSIC") or normalized_name.startswith("VOCAL") or normalized_name.startswith("FX"):
|
|
continue
|
|
|
|
volume = float(track.get("volume", 0.85))
|
|
thresholds = None
|
|
for key, thresh in VOLUME_THRESHOLDS_BY_TRACK.items():
|
|
if key in normalized_name:
|
|
thresholds = thresh
|
|
break
|
|
|
|
if thresholds is None:
|
|
max_vol = QA_PROBLEMATIC_VOLUME_THRESHOLD_HIGH
|
|
min_vol = QA_PROBLEMATIC_VOLUME_THRESHOLD_LOW
|
|
else:
|
|
max_vol = thresholds.get("max", QA_PROBLEMATIC_VOLUME_THRESHOLD_HIGH)
|
|
min_vol = thresholds.get("min", QA_PROBLEMATIC_VOLUME_THRESHOLD_LOW)
|
|
|
|
if volume > max_vol:
|
|
clipping_tracks.append({
|
|
"track_index": track_index,
|
|
"track_name": track_name,
|
|
"volume": volume,
|
|
"threshold": max_vol,
|
|
})
|
|
|
|
if volume < min_vol and not track.get("mute", False):
|
|
quiet_tracks.append({
|
|
"track_index": track_index,
|
|
"track_name": track_name,
|
|
"volume": volume,
|
|
"threshold": min_vol,
|
|
})
|
|
|
|
pan = float(track.get("pan", 0.0))
|
|
if abs(pan) > 0.9:
|
|
pan_extreme_tracks.append({
|
|
"track_index": track_index,
|
|
"track_name": track_name,
|
|
"pan": pan,
|
|
})
|
|
|
|
for item in clipping_tracks[:5]:
|
|
_qa_log_issue(issues, "error", "gain_staging",
|
|
f"Track '{item['track_name']}' volume too high: {item['volume']:.2f} (max {item['threshold']:.2f}) - CLIPPING RISK",
|
|
{"track_index": item["track_index"], "volume": item["volume"], "threshold": item["threshold"],
|
|
"suggestion": f"Reduce volume to {item['threshold']:.2f} or lower to prevent clipping"})
|
|
|
|
for item in quiet_tracks[:5]:
|
|
_qa_log_issue(issues, "warning", "gain_staging",
|
|
f"Track '{item['track_name']}' volume too low: {item['volume']:.2f} (min {item['threshold']:.2f})",
|
|
{"track_index": item["track_index"], "volume": item["volume"], "threshold": item["threshold"],
|
|
"suggestion": f"Increase volume to at least {item['threshold']:.2f} for proper mix level"})
|
|
|
|
for item in pan_extreme_tracks[:3]:
|
|
_qa_log_issue(issues, "info", "gain_staging",
|
|
f"Track '{item['track_name']}' has extreme pan: {item['pan']:+.2f}",
|
|
{"track_index": item["track_index"], "pan": item["pan"],
|
|
"suggestion": "Extreme panning may cause mix balance issues in mono playback"})
|
|
|
|
|
|
def _generate_qa_report(issues: List[Dict[str, Any]], validation_type: str) -> Dict[str, Any]:
|
|
"""Genera un reporte QA estructurado."""
|
|
# Contar por severidad
|
|
by_severity = {"error": 0, "warning": 0, "info": 0}
|
|
by_category: Dict[str, int] = {}
|
|
|
|
for issue in issues:
|
|
severity = str(issue.get("severity", "info")).lower()
|
|
category = str(issue.get("category", "unknown"))
|
|
|
|
if severity in by_severity:
|
|
by_severity[severity] += 1
|
|
by_category[category] = by_category.get(category, 0) + 1
|
|
|
|
# Determinar estado general
|
|
if by_severity["error"] > 0:
|
|
status = "FAILED"
|
|
elif by_severity["warning"] > 0:
|
|
status = "WARNING"
|
|
else:
|
|
status = "PASSED"
|
|
|
|
return {
|
|
"validation_type": validation_type,
|
|
"status": status,
|
|
"total_issues": len(issues),
|
|
"by_severity": by_severity,
|
|
"by_category": by_category,
|
|
"issues": issues,
|
|
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
|
|
}
|
|
|
|
|
|
|
|
@mcp.tool()
|
|
def get_sample_coverage_report(ctx: Context) -> str:
|
|
"""T015: Devuelve reporte de cobertura de samples usados en la librería.
|
|
|
|
Returns:
|
|
JSON con: % de cobertura por subcarpeta, samples más usados, samples nunca usados.
|
|
"""
|
|
try:
|
|
global _sample_usage_history, _coverage_wheel
|
|
|
|
# Calcular estadísticas
|
|
total_samples = len(_sample_usage_history)
|
|
|
|
# Top samples más usados
|
|
top_used = []
|
|
for path, roles in _sample_usage_history.items():
|
|
total_uses = sum(r.get("uses", 0) for r in roles.values())
|
|
last_used = max((r.get("last_used", 0) for r in roles.values()), default=0)
|
|
top_used.append({
|
|
"path": path,
|
|
"name": Path(path).name,
|
|
"total_uses": total_uses,
|
|
"roles": list(roles.keys()),
|
|
"last_used": time.strftime("%Y-%m-%d %H:%M", time.localtime(last_used)) if last_used else None
|
|
})
|
|
top_used.sort(key=lambda x: x["total_uses"], reverse=True)
|
|
|
|
# Samples nunca usados (requiere escanear la librería)
|
|
try:
|
|
sample_manager = get_sample_manager()
|
|
all_samples = list(sample_manager.samples.keys()) if sample_manager else []
|
|
unused_samples = [s for s in all_samples if s not in _sample_usage_history]
|
|
except:
|
|
unused_samples = []
|
|
|
|
# Cobertura por carpeta (Coverage Wheel)
|
|
folder_stats = []
|
|
for folder, data in _coverage_wheel.items():
|
|
folder_samples = data.get("samples", [])
|
|
folder_stats.append({
|
|
"folder": folder,
|
|
"uses": data.get("uses", 0),
|
|
"samples_count": len(folder_samples),
|
|
"last_used": time.strftime("%Y-%m-%d %H:%M", time.localtime(data.get("last_used", 0))) if data.get("last_used") else None
|
|
})
|
|
folder_stats.sort(key=lambda x: x["uses"], reverse=True)
|
|
|
|
# Calcular porcentaje de cobertura
|
|
total_library = len(unused_samples) + total_samples if (len(unused_samples) + total_samples) > 0 else 1
|
|
coverage_percent = (total_samples / total_library) * 100
|
|
|
|
report = {
|
|
"summary": {
|
|
"total_samples_used": total_samples,
|
|
"total_samples_unused": len(unused_samples),
|
|
"coverage_percent": round(coverage_percent, 1),
|
|
"folders_tracked": len(_coverage_wheel)
|
|
},
|
|
"top_used_samples": top_used[:20], # Top 20
|
|
"unused_samples_count": len(unused_samples),
|
|
"folder_coverage": folder_stats[:15], # Top 15 carpetas
|
|
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
|
|
}
|
|
|
|
return json.dumps(report, indent=2)
|
|
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def reset_sample_fatigue(ctx: Context, role: Optional[str] = None) -> str:
|
|
"""
|
|
T023: Resetea la fatiga de samples.
|
|
|
|
La fatiga evita que el mismo sample se use repetidamente en el mismo rol.
|
|
Esta herramienta permite "liberar" samples para volver a ser seleccionados.
|
|
|
|
Args:
|
|
role: Si se especifica, solo resetea fatiga de ese rol (ej: "kick", "bass").
|
|
Si es None, resetea TODA la fatiga del sistema.
|
|
|
|
Returns:
|
|
JSON con resultado del reset.
|
|
"""
|
|
try:
|
|
result = _reset_sample_fatigue(role)
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "reset_sample_fatigue",
|
|
"reset": result.get("reset", "unknown"),
|
|
"cleared": result.get("samples_cleared") or result.get("entries_cleared", 0),
|
|
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def get_sample_fatigue_report(ctx: Context) -> str:
|
|
"""
|
|
T024: Devuelve reporte de fatiga de samples.
|
|
|
|
Muestra qué samples han sido más usados y están siendo penalizados
|
|
en la selección actual.
|
|
|
|
Returns:
|
|
JSON con top-10 samples más usados por rol y overall.
|
|
"""
|
|
try:
|
|
report = _get_sample_fatigue_report()
|
|
|
|
# Enriquecer con datos de fatiga actuales
|
|
fatigue_details = []
|
|
for sample_data in report.get("most_used_overall", [])[:10]:
|
|
path = sample_data["path"]
|
|
total_uses = sample_data["total_uses"]
|
|
last_used = sample_data.get("last_used", 0)
|
|
|
|
# Calcular fatiga actual para cada rol
|
|
sample_entry = _sample_fatigue.get(path, {})
|
|
roles_info = []
|
|
for role_name, role_data in sample_entry.items():
|
|
uses = role_data.get("uses", 0)
|
|
fatigue_factor = _get_fatigue_factor(path, role_name)
|
|
roles_info.append({
|
|
"role": role_name,
|
|
"uses": uses,
|
|
"fatigue_factor": fatigue_factor
|
|
})
|
|
|
|
fatigue_details.append({
|
|
"path": path,
|
|
"name": Path(path).name,
|
|
"total_uses": total_uses,
|
|
"roles": roles_info,
|
|
"last_used": time.strftime("%Y-%m-%d %H:%M", time.localtime(last_used)) if last_used else None
|
|
})
|
|
|
|
full_report = {
|
|
"summary": {
|
|
"total_samples_with_fatigue": report["total_samples"],
|
|
"thresholds": {
|
|
"fresh": "0 usos → factor 1.0",
|
|
"light": "1-3 usos → factor 0.75",
|
|
"moderate": "4-10 usos → factor 0.50",
|
|
"heavy": "10+ usos → factor 0.20"
|
|
}
|
|
},
|
|
"most_used_samples": fatigue_details,
|
|
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
|
|
}
|
|
|
|
return json.dumps(full_report, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def set_palette_lock(ctx: Context, drums: Optional[str] = None, bass: Optional[str] = None, music: Optional[str] = None) -> str:
|
|
"""
|
|
T028: Fuerza un palette específico para la próxima generación.
|
|
|
|
Args:
|
|
drums: Path a carpeta ancla de drums (ej: "librerias/all_tracks/Kick Loops")
|
|
bass: Path a carpeta ancla de bass (ej: "librerias/all_tracks/Bass Loops")
|
|
music: Path a carpeta ancla de music (ej: "librerias/all_tracks/Synth Loops")
|
|
|
|
Returns:
|
|
JSON confirmando el palette lock establecido.
|
|
"""
|
|
try:
|
|
global _palette_lock_override
|
|
|
|
_palette_lock_override = {}
|
|
if drums:
|
|
_palette_lock_override["drums"] = drums
|
|
if bass:
|
|
_palette_lock_override["bass"] = bass
|
|
if music:
|
|
_palette_lock_override["music"] = music
|
|
|
|
logger.info(f"🔒 Palette lock establecido: {_palette_lock_override}")
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "set_palette_lock",
|
|
"palette": _palette_lock_override,
|
|
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def get_coverage_wheel_report(ctx: Context) -> str:
|
|
"""
|
|
T032: Retorna heatmap de uso por carpeta (Coverage Wheel).
|
|
|
|
Muestra qué carpetas de la librería están más/menos usadas
|
|
para guiar selección de samples diversa.
|
|
|
|
Returns:
|
|
JSON con heatmap de carpetas ordenadas por uso.
|
|
"""
|
|
try:
|
|
global _coverage_wheel
|
|
|
|
# Calcular estadísticas
|
|
folder_stats = []
|
|
total_uses = sum(data.get("uses", 0) for data in _coverage_wheel.values())
|
|
|
|
for folder, data in sorted(_coverage_wheel.items(), key=lambda x: x[1].get("uses", 0), reverse=True):
|
|
uses = data.get("uses", 0)
|
|
samples_count = len(data.get("samples", []))
|
|
last_used = data.get("last_used", 0)
|
|
|
|
# Heat level basado en percentil
|
|
if total_uses > 0:
|
|
usage_percent = (uses / total_uses) * 100
|
|
else:
|
|
usage_percent = 0
|
|
|
|
if usage_percent > 20:
|
|
heat = "HOT 🔥"
|
|
elif usage_percent > 10:
|
|
heat = "WARM 🌡️"
|
|
elif usage_percent > 5:
|
|
heat = "COOL ❄️"
|
|
else:
|
|
heat = "FROZEN 🧊"
|
|
|
|
folder_stats.append({
|
|
"folder": folder,
|
|
"folder_name": Path(folder).name,
|
|
"uses": uses,
|
|
"samples_count": samples_count,
|
|
"usage_percent": round(usage_percent, 2),
|
|
"heat_level": heat,
|
|
"last_used": time.strftime("%Y-%m-%d %H:%M", time.localtime(last_used)) if last_used else None
|
|
})
|
|
|
|
report = {
|
|
"summary": {
|
|
"total_folders": len(_coverage_wheel),
|
|
"total_uses": total_uses,
|
|
"hot_folders": sum(1 for f in folder_stats if "HOT" in f["heat_level"]),
|
|
"frozen_folders": sum(1 for f in folder_stats if "FROZEN" in f["heat_level"])
|
|
},
|
|
"heatmap": folder_stats[:30], # Top 30
|
|
"cold_start_candidates": [f["folder"] for f in folder_stats[-10:] if f["uses"] == 0],
|
|
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
|
|
}
|
|
|
|
return json.dumps(report, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
|
|
@mcp.tool()
|
|
def generate_with_human_feel(ctx: Context, genre: str, bpm: float = 0, key: str = "",
|
|
humanize: bool = True, groove_style: str = "shuffle",
|
|
structure: str = "standard") -> str:
|
|
"""
|
|
T040-T050: Genera un track con human feel aplicado.
|
|
|
|
Args:
|
|
genre: Genero musical
|
|
bpm: BPM (0 = auto)
|
|
key: Tonalidad
|
|
humanize: Aplicar humanizacion de timing/velocity
|
|
groove_style: Estilo de groove (straight, shuffle, triplet, latin)
|
|
structure: Estructura de la cancion
|
|
"""
|
|
try:
|
|
logger.info(f"Generando {genre} con human feel (groove={groove_style})")
|
|
|
|
# Get generator
|
|
generator = get_song_generator()
|
|
|
|
# Select palette anchors first
|
|
palette = _select_anchor_folders(genre, key, bpm)
|
|
|
|
# Generate config with palette
|
|
config = generator.generate_config(genre, style="", bpm=bpm, key=key,
|
|
structure=structure, palette=palette)
|
|
|
|
# Initialize human feel engine
|
|
human_engine = HumanFeelEngine(seed=config.get('variant_seed', 42))
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "generate_with_human_feel",
|
|
"config": config,
|
|
"palette": palette,
|
|
"humanize": humanize,
|
|
"groove_style": groove_style,
|
|
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
# ============================================================================
|
|
# FASE 3: HUMAN FEEL & DYNAMICS TOOLS (T040-T050)
|
|
# ============================================================================
|
|
|
|
# FASE 3: HUMAN FEEL & DYNAMICS TOOLS (T040-T050)
|
|
|
|
@mcp.tool()
|
|
def apply_clip_fades(ctx: Context, track_index: int, clip_index: int,
|
|
fade_in_bars: float = 0.0, fade_out_bars: float = 0.0) -> str:
|
|
"""
|
|
T041: Aplica fades in/out a un clip.
|
|
|
|
Args:
|
|
track_index: Índice del track
|
|
clip_index: Índice del clip
|
|
fade_in_bars: Duración del fade in (en beats/bars)
|
|
fade_out_bars: Duración del fade out (en beats/bars)
|
|
|
|
Ejemplo: Intro fade-in 4-8 bars, Outro fade-out simétrico, Break fade-down/up
|
|
"""
|
|
try:
|
|
conn = get_ableton_connection()
|
|
|
|
# 1. Obtener info del clip para saber su duración
|
|
clip_info = conn.send_command("get_clip_info", {
|
|
"track_index": track_index,
|
|
"clip_index": clip_index
|
|
})
|
|
|
|
if not isinstance(clip_info, dict) or clip_info.get("status") != "ok":
|
|
return json.dumps({"error": "Could not get clip info"}, indent=2)
|
|
|
|
clip_length = clip_info.get("length", 4.0)
|
|
|
|
# 2. Crear puntos de automatización para volumen
|
|
envelope_points = []
|
|
|
|
if fade_in_bars > 0:
|
|
# Fade in: 0.0 -> 1.0
|
|
envelope_points.extend([
|
|
{"time": 0.0, "value": 0.0},
|
|
{"time": fade_in_bars, "value": 1.0}
|
|
])
|
|
else:
|
|
envelope_points.append({"time": 0.0, "value": 1.0})
|
|
|
|
if fade_out_bars > 0:
|
|
# Fade out: 1.0 -> 0.0 (al final del clip)
|
|
fade_start = max(0, clip_length - fade_out_bars)
|
|
envelope_points.extend([
|
|
{"time": fade_start, "value": 1.0},
|
|
{"time": clip_length, "value": 0.0}
|
|
])
|
|
|
|
# 3. Enviar comando de automatización
|
|
result = conn.send_command("write_clip_envelope", {
|
|
"track_index": track_index,
|
|
"clip_index": clip_index,
|
|
"parameter": "volume",
|
|
"points": envelope_points
|
|
})
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "apply_clip_fades",
|
|
"track_index": track_index,
|
|
"clip_index": clip_index,
|
|
"fade_in_bars": fade_in_bars,
|
|
"fade_out_bars": fade_out_bars,
|
|
"clip_length": clip_length,
|
|
"envelope_points": len(envelope_points),
|
|
"result": result
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def write_volume_automation(ctx: Context, track_index: int,
|
|
curve_type: str = "linear",
|
|
start_value: float = 0.85,
|
|
end_value: float = 0.85,
|
|
duration_bars: float = 8.0) -> str:
|
|
"""
|
|
T042: Escribe automatización de volumen con curvas.
|
|
|
|
Args:
|
|
track_index: Índice del track
|
|
curve_type: Tipo de curva ('linear', 'exponential', 's_curve', 'punch')
|
|
start_value: Volumen inicial (0.0-1.0, donde 0.85 = 0dB)
|
|
end_value: Volumen final (0.0-1.0)
|
|
duration_bars: Duración de la automatización en bars
|
|
|
|
Ejemplos:
|
|
- Build: exponential 0.5 -> 0.85 en 8 bars
|
|
- Drop punch: punch curve 0.85 -> 1.0 -> 0.85
|
|
"""
|
|
try:
|
|
conn = get_ableton_connection()
|
|
|
|
# Generar puntos según tipo de curva
|
|
points = []
|
|
num_points = 20 # Resolución de la curva
|
|
|
|
for i in range(num_points + 1):
|
|
t = i / num_points
|
|
time = t * duration_bars
|
|
|
|
if curve_type == "linear":
|
|
value = start_value + (end_value - start_value) * t
|
|
elif curve_type == "exponential":
|
|
# Curva exponencial para builds
|
|
if start_value < end_value:
|
|
value = start_value + (end_value - start_value) * (t ** 2)
|
|
else:
|
|
value = start_value - (start_value - end_value) * (t ** 0.5)
|
|
elif curve_type == "s_curve":
|
|
# Curva S suave
|
|
value = start_value + (end_value - start_value) * (3*t**2 - 2*t**3)
|
|
elif curve_type == "punch":
|
|
# Punch: sube rápido, vuelve
|
|
if t < 0.3:
|
|
value = start_value + (1.0 - start_value) * (t / 0.3)
|
|
elif t < 0.7:
|
|
peak = 1.0
|
|
value = peak - (peak - end_value) * ((t - 0.3) / 0.4)
|
|
else:
|
|
value = end_value
|
|
else:
|
|
value = start_value + (end_value - start_value) * t
|
|
|
|
points.append({"time": time, "value": max(0.0, min(1.0, value))})
|
|
|
|
# Enviar comando
|
|
result = conn.send_command("write_track_automation", {
|
|
"track_index": track_index,
|
|
"parameter": "volume",
|
|
"points": points
|
|
})
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "write_volume_automation",
|
|
"track_index": track_index,
|
|
"curve_type": curve_type,
|
|
"start_value": start_value,
|
|
"end_value": end_value,
|
|
"duration_bars": duration_bars,
|
|
"points_count": len(points),
|
|
"result": result
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def apply_sidechain_pump(ctx: Context, target_track: int,
|
|
intensity: str = "subtle",
|
|
style: str = "jackin") -> str:
|
|
"""
|
|
T045: Aplica sidechain pumping a un track.
|
|
|
|
Args:
|
|
target_track: Índice del track objetivo
|
|
intensity: 'subtle', 'moderate', 'heavy'
|
|
style: 'jackin' (cada beat), 'breathing' (cada 2 beats), 'subtle' (mínimo)
|
|
|
|
Configura un sidechain compressor en el track usando el kick como fuente.
|
|
"""
|
|
try:
|
|
conn = get_ableton_connection()
|
|
|
|
# Parámetros según intensidad
|
|
configs = {
|
|
"subtle": {"threshold": -20.0, "ratio": 2.0, "attack": 5.0, "release": 100.0},
|
|
"moderate": {"threshold": -15.0, "ratio": 4.0, "attack": 3.0, "release": 80.0},
|
|
"heavy": {"threshold": -10.0, "ratio": 8.0, "attack": 1.0, "release": 60.0}
|
|
}
|
|
|
|
config = configs.get(intensity, configs["subtle"])
|
|
|
|
# Enviar comando para configurar sidechain
|
|
result = conn.send_command("setup_sidechain", {
|
|
"target_track": target_track,
|
|
"source_track": 0, # Asume track 0 es kick
|
|
"compressor_params": config,
|
|
"style": style
|
|
})
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "apply_sidechain_pump",
|
|
"target_track": target_track,
|
|
"intensity": intensity,
|
|
"style": style,
|
|
"compressor_config": config,
|
|
"result": result
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def inject_pattern_fills(ctx: Context, track_index: int,
|
|
fill_density: str = "medium",
|
|
section: str = "drop") -> str:
|
|
"""
|
|
T048: Inyecta fills de patrón (snare rolls, flams, tom fills, hi-hat busteos).
|
|
|
|
Args:
|
|
track_index: Índice del track de drums
|
|
fill_density: 'sparse' (1 cada 8 bars), 'medium', 'heavy' (cada 2 bars)
|
|
section: Sección donde aplicar (intro, build, drop, break, outro)
|
|
|
|
Añade variación rítmica con fills en puntos estratégicos.
|
|
"""
|
|
try:
|
|
conn = get_ableton_connection()
|
|
|
|
# Configurar densidad
|
|
density_config = {
|
|
"sparse": {"interval_bars": 8, "fill_length": 1},
|
|
"medium": {"interval_bars": 4, "fill_length": 2},
|
|
"heavy": {"interval_bars": 2, "fill_length": 4}
|
|
}
|
|
|
|
config = density_config.get(fill_density, density_config["medium"])
|
|
|
|
# Generar fills
|
|
result = conn.send_command("inject_fills", {
|
|
"track_index": track_index,
|
|
"fill_type": "auto", # snare_roll, flam, tom_fill, hihat_burst
|
|
"interval_bars": config["interval_bars"],
|
|
"fill_length_bars": config["fill_length"],
|
|
"section": section
|
|
})
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "inject_pattern_fills",
|
|
"track_index": track_index,
|
|
"fill_density": fill_density,
|
|
"section": section,
|
|
"config": config,
|
|
"result": result
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def humanize_set(ctx: Context, intensity: float = 0.5) -> str:
|
|
"""
|
|
T050: Herramienta paraguas para humanizar todo el set.
|
|
|
|
Args:
|
|
intensity: Nivel de humanización (0.3 = sutil, 0.6 = medio, 1.0 = extremo)
|
|
|
|
Aplica timing variation, velocity humanize y groove a todos los clips MIDI.
|
|
"""
|
|
try:
|
|
conn = get_ableton_connection()
|
|
from human_feel import HumanFeelEngine
|
|
|
|
# Obtener todos los tracks
|
|
tracks_response = conn.send_command("get_all_tracks")
|
|
if not isinstance(tracks_response, dict):
|
|
return json.dumps({"error": "Could not get tracks"}, indent=2)
|
|
|
|
tracks = tracks_response.get("tracks", [])
|
|
results = []
|
|
|
|
engine = HumanFeelEngine(seed=int(time.time()))
|
|
|
|
for track in tracks:
|
|
track_idx = track.get("index")
|
|
is_midi = track.get("is_midi", False)
|
|
|
|
if not is_midi:
|
|
continue
|
|
|
|
# Aplicar humanización a clips MIDI
|
|
clips = track.get("clips", [])
|
|
for clip in clips:
|
|
clip_idx = clip.get("index", 0)
|
|
|
|
# Aplicar human feel según intensidad
|
|
if intensity >= 0.6:
|
|
# Timing + Velocity + Groove
|
|
settings = {
|
|
"timing_variation_ms": intensity * 10,
|
|
"velocity_variance": intensity * 0.1,
|
|
"groove_style": "shuffle" if intensity > 0.7 else "straight"
|
|
}
|
|
else:
|
|
# Solo velocity
|
|
settings = {
|
|
"velocity_variance": intensity * 0.05
|
|
}
|
|
|
|
results.append({
|
|
"track": track_idx,
|
|
"clip": clip_idx,
|
|
"settings": settings
|
|
})
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "humanize_set",
|
|
"intensity": intensity,
|
|
"tracks_affected": len(results),
|
|
"clips_processed": len(results),
|
|
"details": results
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def reset_diversity_memory(ctx: Context) -> str:
|
|
"""
|
|
Limpia la memoria de diversidad entre generaciones.
|
|
|
|
Esto permite que el sistema vuelva a usar familias de samples
|
|
que habían sido penalizadas por uso previo.
|
|
|
|
Útil cuando quieres un "refresh" completo de las selecciones.
|
|
"""
|
|
try:
|
|
# Resetear memoria en sample_selector
|
|
if reset_cross_generation_memory is not None:
|
|
reset_cross_generation_memory()
|
|
|
|
# Resetear memoria persistente en diversity_memory
|
|
try:
|
|
from diversity_memory import reset_diversity_memory as _reset_diversity_persistent
|
|
_reset_diversity_persistent()
|
|
logger.info("Memoria de diversidad persistente reseteada")
|
|
except ImportError:
|
|
logger.warning("diversity_memory no disponible, solo se reseteó memoria en RAM")
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"message": "Memoria de diversidad reseteada completamente",
|
|
"action": "reset_diversity_memory",
|
|
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
|
|
}, indent=2)
|
|
|
|
except Exception as e:
|
|
return json.dumps({
|
|
"status": "error",
|
|
"message": str(e),
|
|
"action": "reset_diversity_memory"
|
|
}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def get_diversity_memory_stats(ctx: Context) -> str:
|
|
"""
|
|
Obtiene estadísticas de la memoria de diversidad.
|
|
|
|
Returns:
|
|
JSON con:
|
|
- used_families: familias de samples usadas y conteos
|
|
- total_families: número total de familias
|
|
- generation_count: contador de generaciones
|
|
- file_location: ubicación del archivo persistente
|
|
- critical_roles: roles críticos que usan memoria
|
|
- penalty_formula: fórmula de penalización aplicada
|
|
"""
|
|
try:
|
|
stats = {}
|
|
|
|
# Intentar obtener stats del sistema persistente
|
|
try:
|
|
from diversity_memory import get_diversity_memory_stats as _get_diversity_stats
|
|
stats = _get_diversity_stats()
|
|
logger.info("Stats de memoria obtenidas desde diversity_memory")
|
|
except ImportError:
|
|
logger.warning("diversity_memory no disponible, usando memoria en RAM")
|
|
# Fallback a memoria en RAM
|
|
from sample_selector import get_cross_generation_state
|
|
families, paths = get_cross_generation_state()
|
|
stats = {
|
|
"used_families": families,
|
|
"total_families": len(families),
|
|
"used_paths": paths,
|
|
"total_paths": len(paths),
|
|
"generation_count": "N/A (diversity_memory no disponible)",
|
|
"file_location": None,
|
|
"critical_roles": ["kick", "clap", "hat", "bass_loop", "vocal_loop", "top_loop"],
|
|
"penalty_formula": {"0 usos": 1.0, "1 uso": 0.7, "2 usos": 0.5, "3+ usos": 0.3},
|
|
"source": "RAM (diversity_memory no disponible)"
|
|
}
|
|
|
|
return json.dumps(stats, indent=2, default=str)
|
|
|
|
except Exception as e:
|
|
return json.dumps({
|
|
"status": "error",
|
|
"message": str(e),
|
|
"action": "get_diversity_memory_stats"
|
|
}, indent=2)
|
|
|
|
|
|
# ============================================================================
|
|
# FASE 2.C/D/E: FINGERPRINT & WILD CARD TOOLS (T033-T039)
|
|
# ============================================================================
|
|
|
|
@mcp.tool()
|
|
def find_duplicate_samples(ctx: Context) -> str:
|
|
"""
|
|
T033-T039: Encuentra samples duplicados en la librería.
|
|
|
|
Usa fingerprinting para detectar archivos idénticos.
|
|
|
|
Returns:
|
|
JSON con grupos de archivos duplicados.
|
|
"""
|
|
try:
|
|
if get_fingerprint_db is None:
|
|
return json.dumps({"error": "audio_fingerprint module not available"}, indent=2)
|
|
|
|
db = get_fingerprint_db()
|
|
duplicates = db.find_duplicates()
|
|
|
|
return json.dumps({
|
|
"total_duplicates": len(duplicates),
|
|
"groups": [
|
|
{"hash": i, "files": group}
|
|
for i, group in enumerate(duplicates)
|
|
],
|
|
"action": "Consider removing duplicates to save space"
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def wildcard_search_samples(ctx: Context, category: str) -> str:
|
|
"""
|
|
T033-T034: Búsqueda wildcard por categoría.
|
|
|
|
Args:
|
|
category: Categoría wildcard (any_drum, any_bass, any_synth, any_vocal, any_fx)
|
|
|
|
Returns:
|
|
JSON con patrones de búsqueda para la categoría.
|
|
"""
|
|
try:
|
|
if WildCardMatcher is None:
|
|
return json.dumps({"error": "WildCardMatcher not available"}, indent=2)
|
|
|
|
patterns = WildCardMatcher.get_wildcard_query(category)
|
|
|
|
return json.dumps({
|
|
"category": category,
|
|
"patterns": patterns,
|
|
"description": f"Use these patterns to search for {category} samples"
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def get_section_roles(ctx: Context, section_kind: str) -> str:
|
|
"""
|
|
T035-T037: Retorna roles recomendados para una sección.
|
|
|
|
Args:
|
|
section_kind: Tipo de sección (intro, build, drop, break, outro)
|
|
|
|
Returns:
|
|
JSON con roles primary, secondary y avoid.
|
|
"""
|
|
try:
|
|
if SectionCastingEngine is None:
|
|
return json.dumps({"error": "SectionCastingEngine not available"}, indent=2)
|
|
|
|
engine = SectionCastingEngine()
|
|
roles = engine.get_roles_for_section(section_kind)
|
|
|
|
return json.dumps({
|
|
"section": section_kind,
|
|
"roles": roles,
|
|
"recommendation": f"Use primary roles for {section_kind}, avoid 'avoid' roles"
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
# ============================================================================
|
|
# T101-T104: BUS ROUTING SYSTEM FIX TOOLS
|
|
# ============================================================================
|
|
|
|
@mcp.tool()
|
|
def diagnose_bus_routing(ctx: Context) -> str:
|
|
"""
|
|
T102: Diagnostica problemas de enrutamiento de buses.
|
|
|
|
Detecta:
|
|
- Tracks en bus incorrecto
|
|
- Sends excesivos en kicks/bass
|
|
- FX bypassing master
|
|
|
|
Returns:
|
|
JSON con problemas detectados.
|
|
"""
|
|
try:
|
|
if get_routing_fixer is None:
|
|
return json.dumps({"error": "bus_routing_fix module not available"}, indent=2)
|
|
|
|
# Obtener tracks de Ableton
|
|
tracks_response = _send_command_to_ableton({
|
|
"command": "get_all_tracks"
|
|
})
|
|
|
|
if isinstance(tracks_response, dict) and tracks_response.get("status") == "ok":
|
|
tracks = tracks_response.get("tracks", [])
|
|
fixer = get_routing_fixer()
|
|
issues = fixer.diagnose_routing(tracks)
|
|
|
|
return json.dumps({
|
|
"issues_found": len(issues),
|
|
"critical": len([i for i in issues if i.get('severity') == 'high']),
|
|
"warnings": len([i for i in issues if i.get('severity') in ['medium', 'low']]),
|
|
"issues": issues,
|
|
"recommendation": "Use fix_bus_routing() to apply fixes"
|
|
}, indent=2)
|
|
else:
|
|
return json.dumps({"error": "Could not get tracks from Ableton"}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def get_bus_routing_config(ctx: Context) -> str:
|
|
"""
|
|
T101: Retorna configuración completa de enrutamiento de buses.
|
|
|
|
Shows RCA bus setup and role mappings.
|
|
|
|
Returns:
|
|
JSON con configuración de buses.
|
|
"""
|
|
try:
|
|
if get_routing_fixer is None:
|
|
return json.dumps({"error": "bus_routing_fix module not available"}, indent=2)
|
|
|
|
fixer = get_routing_fixer()
|
|
config = fixer.get_bus_routing_config()
|
|
|
|
return json.dumps(config, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def get_bus_for_role(ctx: Context, role: str) -> str:
|
|
"""
|
|
T101: Retorna el bus RCA apropiado para un rol.
|
|
|
|
Args:
|
|
role: Rol del sample (kick, bass, vocal, etc.)
|
|
|
|
Returns:
|
|
JSON con bus recomendado.
|
|
"""
|
|
try:
|
|
if BusRoutingRules is None:
|
|
return json.dumps({"error": "BusRoutingRules not available"}, indent=2)
|
|
|
|
bus = BusRoutingRules.get_bus_for_role(role)
|
|
|
|
return json.dumps({
|
|
"role": role,
|
|
"recommended_bus": bus,
|
|
"all_buses": BusRoutingRules.RCA_BUSES
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
# ============================================================================
|
|
# T105-T106: VALIDATION SYSTEM FIX TOOLS
|
|
# ============================================================================
|
|
|
|
@mcp.tool()
|
|
def validate_set_detailed(ctx: Context, check_clips: bool = True,
|
|
check_keys: bool = True, check_gain: bool = True) -> str:
|
|
"""
|
|
T105-T106: Validación detallada del set.
|
|
|
|
Detecta:
|
|
- Clips vacíos o corruptos
|
|
- Key conflicts graves
|
|
- Samples duplicados
|
|
- Problemas de gain staging
|
|
|
|
Args:
|
|
check_clips: Validar clips
|
|
check_keys: Validar keys armónicos
|
|
check_gain: Validar niveles de ganancia
|
|
|
|
Returns:
|
|
JSON con reporte de validación completo.
|
|
"""
|
|
try:
|
|
if get_validation_fixer is None:
|
|
return json.dumps({"error": "validation_system_fix module not available"}, indent=2)
|
|
|
|
# Obtener datos del set de Ableton
|
|
set_response = _send_command_to_ableton({
|
|
"command": "get_set_info"
|
|
})
|
|
|
|
if isinstance(set_response, dict) and set_response.get("status") == "ok":
|
|
set_data = set_response.get("data", {})
|
|
|
|
# Añadir tracks si no están incluidos
|
|
if "tracks" not in set_data:
|
|
tracks_response = _send_command_to_ableton({
|
|
"command": "get_all_tracks"
|
|
})
|
|
if isinstance(tracks_response, dict):
|
|
set_data["tracks"] = tracks_response.get("tracks", [])
|
|
|
|
fixer = get_validation_fixer()
|
|
report = fixer.run_full_validation(set_data)
|
|
|
|
return json.dumps(report, indent=2)
|
|
else:
|
|
return json.dumps({"error": "Could not get set info from Ableton"}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def validate_key_conflicts(ctx: Context, target_key: str = "") -> str:
|
|
"""
|
|
T106: Valida conflictos armónicos contra key objetivo.
|
|
|
|
Args:
|
|
target_key: Key objetivo (ej: "F#m", "Am"). Si vacío, usa key del set.
|
|
|
|
Returns:
|
|
JSON con conflictos detectados.
|
|
"""
|
|
try:
|
|
if get_validation_fixer is None:
|
|
return json.dumps({"error": "validation_system_fix module not available"}, indent=2)
|
|
|
|
# Obtener tracks y key del set si no se especificó
|
|
if not target_key:
|
|
set_response = _send_command_to_ableton({
|
|
"command": "get_set_info"
|
|
})
|
|
if isinstance(set_response, dict):
|
|
target_key = set_response.get("key", "Am")
|
|
|
|
tracks_response = _send_command_to_ableton({
|
|
"command": "get_all_tracks"
|
|
})
|
|
|
|
if isinstance(tracks_response, dict) and tracks_response.get("status") == "ok":
|
|
tracks = tracks_response.get("tracks", [])
|
|
fixer = get_validation_fixer()
|
|
issues = fixer.validate_key_conflicts(tracks, target_key)
|
|
|
|
return json.dumps({
|
|
"target_key": target_key,
|
|
"conflicts_found": len(issues),
|
|
"severe_conflicts": len([i for i in issues if i.severity == 'error']),
|
|
"warnings": len([i for i in issues if i.severity == 'warning']),
|
|
"issues": [
|
|
{
|
|
"type": i.type,
|
|
"track": i.track,
|
|
"message": i.message,
|
|
"suggestion": i.suggestion
|
|
}
|
|
for i in issues
|
|
]
|
|
}, indent=2)
|
|
else:
|
|
return json.dumps({"error": "Could not get tracks from Ableton"}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
# ============================================================================
|
|
# FASE 5: DJ ARRANGEMENT ADVANCED TOOLS (T067, T072-T077)
|
|
# ============================================================================
|
|
|
|
@mcp.tool()
|
|
def set_loop_markers(ctx: Context, position_bar: int = 0,
|
|
length_bars: int = 16,
|
|
name: str = "Drop Loop") -> str:
|
|
"""
|
|
T067: Configura loop markers en puntos clave de la canción.
|
|
|
|
Args:
|
|
position_bar: Posición de inicio del loop (en bars)
|
|
length_bars: Duración del loop (default 16 bars = 1 drop)
|
|
name: Nombre descriptivo del loop (ej: "Drop 1", "Break", "Intro")
|
|
|
|
Crea marcadores de loop en Arrangement View para facilitar navegación DJ.
|
|
"""
|
|
try:
|
|
conn = get_ableton_connection()
|
|
|
|
end_bar = position_bar + length_bars
|
|
|
|
result = conn.send_command("set_loop_markers", {
|
|
"start_bar": position_bar,
|
|
"end_bar": end_bar,
|
|
"name": name,
|
|
"color": "red" if "drop" in name.lower() else "blue" if "break" in name.lower() else "yellow"
|
|
})
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "set_loop_markers",
|
|
"loop_name": name,
|
|
"start_bar": position_bar,
|
|
"end_bar": end_bar,
|
|
"length_bars": length_bars,
|
|
"result": result,
|
|
"note": "Loop marcado para navegación DJ - shift+tab para saltar"
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def apply_filter_sweep(ctx: Context, track_index: int,
|
|
section_start_bar: int,
|
|
section_end_bar: int,
|
|
sweep_type: str = "highpass_up") -> str:
|
|
"""
|
|
T072: Aplica filter sweep automation en transiciones.
|
|
|
|
Args:
|
|
track_index: Track objetivo (usualmente bass o music)
|
|
section_start_bar: Inicio de la transición
|
|
section_end_bar: Fin de la transición (drop)
|
|
sweep_type: 'highpass_up' (sube filtro), 'lowpass_down' (baja filtro)
|
|
|
|
Ejemplo: High-pass sube 8 bars antes del drop, snap al drop.
|
|
"""
|
|
try:
|
|
conn = get_ableton_connection()
|
|
|
|
duration = section_end_bar - section_start_bar
|
|
|
|
# Configuración del sweep según tipo
|
|
if sweep_type == "highpass_up":
|
|
# High-pass de 20Hz -> 800Hz
|
|
points = [
|
|
{"time": 0, "value": 0.0, "bar": section_start_bar}, # 20Hz
|
|
{"time": duration * 0.7, "value": 0.3, "bar": section_start_bar + duration * 0.7},
|
|
{"time": duration, "value": 0.8, "bar": section_end_bar} # 800Hz
|
|
]
|
|
filter_type = "high_pass"
|
|
elif sweep_type == "lowpass_down":
|
|
# Low-pass de 20kHz -> 800Hz
|
|
points = [
|
|
{"time": 0, "value": 1.0, "bar": section_start_bar}, # 20kHz
|
|
{"time": duration * 0.7, "value": 0.6, "bar": section_start_bar + duration * 0.7},
|
|
{"time": duration, "value": 0.2, "bar": section_end_bar} # 800Hz
|
|
]
|
|
filter_type = "low_pass"
|
|
else:
|
|
return json.dumps({"error": f"Unknown sweep_type: {sweep_type}"}, indent=2)
|
|
|
|
result = conn.send_command("write_filter_automation", {
|
|
"track_index": track_index,
|
|
"filter_type": filter_type,
|
|
"points": points,
|
|
"section": f"{section_start_bar}-{section_end_bar}"
|
|
})
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "apply_filter_sweep",
|
|
"track_index": track_index,
|
|
"sweep_type": sweep_type,
|
|
"filter_type": filter_type,
|
|
"start_bar": section_start_bar,
|
|
"end_bar": section_end_bar,
|
|
"duration_bars": duration,
|
|
"automation_points": len(points),
|
|
"result": result
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def apply_reverb_tail_automation(ctx: Context, track_index: int,
|
|
section_start_bar: int,
|
|
section_end_bar: int) -> str:
|
|
"""
|
|
T073: Aplica reverb tail automation en breaks.
|
|
|
|
Args:
|
|
track_index: Track objetivo (atmos, pad, vocals)
|
|
section_start_bar: Inicio del break
|
|
section_end_bar: Fin del break (retorno al drop)
|
|
|
|
Patrón: Reverb 0% -> 40% -> 0% para crear espacio en breaks.
|
|
"""
|
|
try:
|
|
conn = get_ableton_connection()
|
|
|
|
duration = section_end_bar - section_start_bar
|
|
|
|
# Curva de reverb: inicio -> medio (máximo) -> fin (mínimo)
|
|
points = [
|
|
{"time": 0, "value": 0.0, "bar": section_start_bar}, # Inicio: sin reverb
|
|
{"time": duration * 0.4, "value": 0.4, "bar": section_start_bar + duration * 0.4}, # Máximo reverb
|
|
{"time": duration * 0.8, "value": 0.4, "bar": section_start_bar + duration * 0.8}, # Mantener
|
|
{"time": duration, "value": 0.0, "bar": section_end_bar} # Volver a 0 antes del drop
|
|
]
|
|
|
|
result = conn.send_command("write_reverb_automation", {
|
|
"track_index": track_index,
|
|
"parameter": "reverb_wet",
|
|
"points": points
|
|
})
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "apply_reverb_tail_automation",
|
|
"track_index": track_index,
|
|
"start_bar": section_start_bar,
|
|
"end_bar": section_end_bar,
|
|
"max_reverb": 0.4,
|
|
"pattern": "0% -> 40% -> 0%",
|
|
"result": result
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def apply_pitch_riser(ctx: Context, track_index: int,
|
|
start_bar: int,
|
|
end_bar: int,
|
|
start_semitones: float = 0.0,
|
|
end_semitones: float = 12.0) -> str:
|
|
"""
|
|
T074: Aplica pitch automation tipo riser.
|
|
|
|
Args:
|
|
track_index: Track objetivo (synth, atmos, noise)
|
|
start_bar: Inicio del riser
|
|
end_bar: Fin del riser (beat del drop)
|
|
start_semitones: Pitch inicial (default 0)
|
|
end_semitones: Pitch final (default +12 = 1 octava arriba)
|
|
|
|
Riser de pitch para aumentar tensión antes del drop.
|
|
"""
|
|
try:
|
|
conn = get_ableton_connection()
|
|
|
|
duration = end_bar - start_bar
|
|
|
|
# Curva exponencial de pitch
|
|
num_points = 10
|
|
points = []
|
|
for i in range(num_points + 1):
|
|
t = i / num_points
|
|
# Curva exponencial para más tensión al final
|
|
pitch = start_semitones + (end_semitones - start_semitones) * (t ** 1.5)
|
|
points.append({
|
|
"time": t * duration,
|
|
"value": pitch,
|
|
"bar": start_bar + t * duration
|
|
})
|
|
|
|
result = conn.send_command("write_pitch_automation", {
|
|
"track_index": track_index,
|
|
"points": points,
|
|
"snap_to": start_semitones # Snap al pitch original después del drop
|
|
})
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "apply_pitch_riser",
|
|
"track_index": track_index,
|
|
"start_bar": start_bar,
|
|
"end_bar": end_bar,
|
|
"pitch_range": f"{start_semitones:+d} -> {end_semitones:+d} semitones",
|
|
"automation_points": len(points),
|
|
"result": result
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def apply_micro_timing_push(ctx: Context, track_index: int,
|
|
kick_offset_ms: float = -5.0,
|
|
bass_offset_ms: float = 8.0,
|
|
apply_to_clips: bool = True) -> str:
|
|
"""
|
|
T075: Aplica micro-timing "push" para groove orgánico.
|
|
|
|
Args:
|
|
track_index: Track objetivo (o -1 para todos los drums)
|
|
kick_offset_ms: Offset del kick (-5ms = adelante)
|
|
bass_offset_ms: Offset del bass (+8ms = atrás, después del kick)
|
|
apply_to_clips: Aplicar a clips existentes
|
|
|
|
Técnica: Kick -5ms (empuja), Bass +8ms (siente) para feel orgánico tipo硬件/hardware.
|
|
"""
|
|
try:
|
|
conn = get_ableton_connection()
|
|
|
|
if track_index == -1:
|
|
# Aplicar a todos los tracks de drums
|
|
tracks_response = conn.send_command("get_all_tracks")
|
|
tracks = tracks_response.get("tracks", []) if isinstance(tracks_response, dict) else []
|
|
|
|
drum_tracks = []
|
|
for t in tracks:
|
|
name = t.get("name", "").lower()
|
|
if any(x in name for x in ["kick", "drum", "perc"]):
|
|
drum_tracks.append(t.get("index"))
|
|
|
|
results = []
|
|
for idx in drum_tracks:
|
|
result = conn.send_command("apply_track_delay", {
|
|
"track_index": idx,
|
|
"delay_ms": kick_offset_ms if "kick" in tracks[idx].get("name", "").lower() else 0.0
|
|
})
|
|
results.append({"track": idx, "result": result})
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "apply_micro_timing_push",
|
|
"mode": "all_drums",
|
|
"drum_tracks_affected": len(drum_tracks),
|
|
"kick_offset_ms": kick_offset_ms,
|
|
"bass_offset_ms": bass_offset_ms,
|
|
"results": results,
|
|
"note": "Kick adelantado -5ms, otros al tiempo"
|
|
}, indent=2)
|
|
else:
|
|
# Aplicar a track específico
|
|
result = conn.send_command("apply_track_delay", {
|
|
"track_index": track_index,
|
|
"delay_ms": kick_offset_ms
|
|
})
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "apply_micro_timing_push",
|
|
"track_index": track_index,
|
|
"delay_ms": kick_offset_ms,
|
|
"result": result
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def apply_groove_template(ctx: Context, section: str,
|
|
template_name: str = "tech_house_drop") -> str:
|
|
"""
|
|
T077: Aplica groove template por sección y subgénero.
|
|
|
|
Args:
|
|
section: Sección a aplicar (intro, build, drop, break, outro)
|
|
template_name: Nombre del template:
|
|
- 'tech_house_drop': Groove apretado, sidechain pronunciado
|
|
- 'tech_house_break': Más swing, espaciado
|
|
- 'deep_house_drop': Groove suelto, shuffle suave
|
|
- 'techno_minimal': Preciso, casi straight
|
|
|
|
Aplica groove predefinido a todos los clips de la sección.
|
|
"""
|
|
try:
|
|
from audio_arrangement import DJArrangementEngine
|
|
|
|
# Configuraciones de groove por template
|
|
GROOVE_TEMPLATES = {
|
|
"tech_house_drop": {
|
|
"swing": 0.14,
|
|
"timing_variation_ms": 3.0,
|
|
"velocity_variance": 0.08,
|
|
"description": "Tight groove, strong sidechain"
|
|
},
|
|
"tech_house_break": {
|
|
"swing": 0.18,
|
|
"timing_variation_ms": 6.0,
|
|
"velocity_variance": 0.12,
|
|
"description": "Loose groove, more space"
|
|
},
|
|
"deep_house_drop": {
|
|
"swing": 0.20,
|
|
"timing_variation_ms": 8.0,
|
|
"velocity_variance": 0.10,
|
|
"description": "Laid-back shuffle feel"
|
|
},
|
|
"techno_minimal": {
|
|
"swing": 0.08,
|
|
"timing_variation_ms": 2.0,
|
|
"velocity_variance": 0.05,
|
|
"description": "Precise, straight timing"
|
|
}
|
|
}
|
|
|
|
template = GROOVE_TEMPLATES.get(template_name, GROOVE_TEMPLATES["tech_house_drop"])
|
|
|
|
conn = get_ableton_connection()
|
|
|
|
# Obtener tracks de la sección
|
|
result = conn.send_command("apply_groove_to_section", {
|
|
"section": section,
|
|
"swing": template["swing"],
|
|
"humanize": True,
|
|
"timing_variation_ms": template["timing_variation_ms"],
|
|
"velocity_variance": template["velocity_variance"]
|
|
})
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "apply_groove_template",
|
|
"section": section,
|
|
"template": template_name,
|
|
"template_description": template["description"],
|
|
"swing": template["swing"],
|
|
"timing_variation_ms": template["timing_variation_ms"],
|
|
"velocity_variance": template["velocity_variance"],
|
|
"result": result
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def inject_transition_fx_detailed(ctx: Context, fx_type: str,
|
|
position_bar: int,
|
|
intensity: str = "medium") -> str:
|
|
"""
|
|
T071-T077: Inyecta FX de transición avanzados (riser, crash, snare_roll, noise_sweep).
|
|
|
|
Args:
|
|
fx_type: Tipo de FX ('riser', 'crash', 'snare_roll', 'noise_sweep', 'reverse')
|
|
position_bar: Posición en bars donde colocar el FX
|
|
intensity: 'subtle', 'medium', 'heavy'
|
|
|
|
Versión mejorada de inject_transition_fx con más opciones.
|
|
"""
|
|
try:
|
|
conn = get_ableton_connection()
|
|
|
|
# Duración según tipo e intensidad
|
|
duration_config = {
|
|
"riser": {"subtle": 4, "medium": 8, "heavy": 16},
|
|
"crash": {"subtle": 1, "medium": 2, "heavy": 4},
|
|
"snare_roll": {"subtle": 2, "medium": 4, "heavy": 8},
|
|
"noise_sweep": {"subtle": 4, "medium": 8, "heavy": 16},
|
|
"reverse": {"subtle": 2, "medium": 4, "heavy": 8}
|
|
}
|
|
|
|
duration = duration_config.get(fx_type, {}).get(intensity, 4)
|
|
|
|
# Crear clip de FX
|
|
result = conn.send_command("create_fx_clip", {
|
|
"fx_type": fx_type,
|
|
"position_bar": position_bar,
|
|
"duration": duration,
|
|
"intensity": intensity,
|
|
"automation": fx_type in ["riser", "noise_sweep"] # Auto-volume rise
|
|
})
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "inject_transition_fx_detailed",
|
|
"fx_type": fx_type,
|
|
"position_bar": position_bar,
|
|
"intensity": intensity,
|
|
"duration_bars": duration,
|
|
"result": result
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
# ============================================================================
|
|
# FASE 7: SELF-AI & LEARNING TOOLS (T091-T100)
|
|
# ============================================================================
|
|
|
|
@mcp.tool()
|
|
def rate_generation(ctx: Context, session_id: str,
|
|
score: int,
|
|
notes: str = "") -> str:
|
|
"""
|
|
T091: Sistema de rating para generaciones.
|
|
|
|
Args:
|
|
session_id: ID de la sesión/generación (del manifest)
|
|
score: Puntuación 1-5 (5 = excelente, 1 = mala)
|
|
notes: Notas opcionales sobre qué funcionó/no funcionó
|
|
|
|
Almacena rating para feedback loop y análisis de preferencias.
|
|
"""
|
|
try:
|
|
import os
|
|
from datetime import datetime
|
|
|
|
# Almacenar rating
|
|
rating_data = {
|
|
"session_id": session_id,
|
|
"score": score,
|
|
"notes": notes,
|
|
"timestamp": datetime.now().isoformat(),
|
|
"manifest": _get_stored_manifest()
|
|
}
|
|
|
|
# Guardar en archivo de ratings
|
|
ratings_path = Path.home() / ".abletonmcp_ai" / "generation_ratings.json"
|
|
|
|
ratings = []
|
|
if ratings_path.exists():
|
|
with open(ratings_path, 'r') as f:
|
|
ratings = json.load(f)
|
|
|
|
ratings.append(rating_data)
|
|
|
|
with open(ratings_path, 'w') as f:
|
|
json.dump(ratings, f, indent=2)
|
|
|
|
# Ajustar fatiga según rating
|
|
if score >= 4:
|
|
# Buen rating: reducir fatiga de samples usados para reutilización futura
|
|
_adjust_fatigue_for_good_rating(session_id)
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "rate_generation",
|
|
"session_id": session_id,
|
|
"score": score,
|
|
"notes": notes,
|
|
"total_ratings": len(ratings),
|
|
"feedback_loop": "Activado" if score >= 4 else "Neutral"
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
def _adjust_fatigue_for_good_rating(session_id: str):
|
|
"""Reduce fatiga de samples usados en generaciones bien puntuadas."""
|
|
global _sample_fatigue
|
|
|
|
manifest = _get_stored_manifest()
|
|
for track in manifest.get("tracks_blueprint", []):
|
|
for sample_path in track.get("sample_paths", []):
|
|
if sample_path in _sample_fatigue:
|
|
# Reducir uso en 1 para este rol
|
|
for role, data in _sample_fatigue[sample_path].items():
|
|
if data.get("uses", 0) > 0:
|
|
data["uses"] = max(0, data["uses"] - 1)
|
|
|
|
|
|
@mcp.tool()
|
|
def get_generation_stats(ctx: Context, last_n: int = 20) -> str:
|
|
"""
|
|
T093-T094: Obtiene estadísticas de generaciones pasadas.
|
|
|
|
Args:
|
|
last_n: Número de generaciones a analizar (default 20)
|
|
|
|
Retorna análisis de tendencias, preferencias de palette por BPM/key,
|
|
y carpetas con mejor/menor performance histórica.
|
|
"""
|
|
try:
|
|
ratings_path = Path.home() / ".abletonmcp_ai" / "generation_ratings.json"
|
|
|
|
if not ratings_path.exists():
|
|
return json.dumps({
|
|
"status": "no_data",
|
|
"message": "No ratings found. Use rate_generation() first."
|
|
}, indent=2)
|
|
|
|
with open(ratings_path, 'r') as f:
|
|
ratings = json.load(f)
|
|
|
|
# Análisis de últimas N generaciones
|
|
recent = ratings[-last_n:]
|
|
|
|
# Calcular promedio
|
|
avg_score = sum(r["score"] for r in recent) / len(recent) if recent else 0
|
|
|
|
# Preferencias de palette por BPM
|
|
bpm_preferences = {}
|
|
key_preferences = {}
|
|
|
|
for r in recent:
|
|
manifest = r.get("manifest", {})
|
|
bpm = manifest.get("bpm", 0)
|
|
key = manifest.get("key", "unknown")
|
|
palette = manifest.get("palette", {})
|
|
|
|
if bpm > 0:
|
|
bpm_range = f"{int(bpm/10)*10}-{int(bpm/10)*10+9}"
|
|
if bpm_range not in bpm_preferences:
|
|
bpm_preferences[bpm_range] = {"count": 0, "avg_score": 0, "palettes": []}
|
|
bpm_preferences[bpm_range]["count"] += 1
|
|
bpm_preferences[bpm_range]["avg_score"] += r["score"]
|
|
bpm_preferences[bpm_range]["palettes"].append(palette)
|
|
|
|
if key not in key_preferences:
|
|
key_preferences[key] = {"count": 0, "avg_score": 0}
|
|
key_preferences[key]["count"] += 1
|
|
key_preferences[key]["avg_score"] += r["score"]
|
|
|
|
# Calcular promedios
|
|
for bp in bpm_preferences.values():
|
|
if bp["count"] > 0:
|
|
bp["avg_score"] = round(bp["avg_score"] / bp["count"], 2)
|
|
|
|
for kp in key_preferences.values():
|
|
if kp["count"] > 0:
|
|
kp["avg_score"] = round(kp["avg_score"] / kp["count"], 2)
|
|
|
|
# Top keys y BPMs
|
|
top_keys = sorted(key_preferences.items(), key=lambda x: x[1]["avg_score"], reverse=True)[:5]
|
|
top_bpms = sorted(bpm_preferences.items(), key=lambda x: x[1]["avg_score"], reverse=True)[:3]
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "get_generation_stats",
|
|
"generations_analyzed": len(recent),
|
|
"average_score": round(avg_score, 2),
|
|
"top_performing_keys": [
|
|
{"key": k, "score": v["avg_score"], "count": v["count"]} for k, v in top_keys
|
|
],
|
|
"top_performing_bpm_ranges": [
|
|
{"range": b, "score": v["avg_score"], "count": v["count"]} for b, v in top_bpms
|
|
],
|
|
"prediction_confidence": "high" if len(recent) >= 10 else "medium" if len(recent) >= 5 else "low",
|
|
"recommendation": f"Try keys: {', '.join(k for k, _ in top_keys[:3])} with BPM ranges: {', '.join(b for b, _ in top_bpms[:2])}"
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def generate_dj_set(ctx: Context, duration_hours: float = 1.0,
|
|
style_evolution: str = "progressive") -> str:
|
|
"""
|
|
T096: Genera un set DJ completo de N horas.
|
|
|
|
Args:
|
|
duration_hours: Duración del set (0.5 - 4.0 horas)
|
|
style_evolution: Evolución del set:
|
|
- 'progressive': De deep a peak time
|
|
- 'peak_time': Toda energía alta
|
|
- 'warmup': Inicio suave, construcción gradual
|
|
|
|
Genera múltiples tracks conectados con Palette Lock linked entre sí.
|
|
"""
|
|
try:
|
|
# Calcular número de tracks necesarios
|
|
# Asumiendo tracks de ~6 minutos promedio
|
|
track_duration_min = 6
|
|
num_tracks = int((duration_hours * 60) / track_duration_min) + 1
|
|
|
|
# Evolución de estilos
|
|
evolution_config = {
|
|
"progressive": ["deep_house", "tech_house", "techno_peak"],
|
|
"peak_time": ["tech_house", "techno_peak", "techno_industrial"],
|
|
"warmup": ["deep_house", "deep_tech", "tech_house"]
|
|
}
|
|
|
|
styles = evolution_config.get(style_evolution, evolution_config["progressive"])
|
|
|
|
# Generar tracks con palette linking
|
|
generator = get_song_generator()
|
|
generated_tracks = []
|
|
shared_palette = None
|
|
|
|
base_bpm = 124
|
|
base_key = "Am"
|
|
|
|
for i, style in enumerate(styles):
|
|
# Progresión de BPM
|
|
bpm = base_bpm + (i * 2) # +2 BPM por track
|
|
|
|
# Progresión de key (circle of fifths)
|
|
from audio_key_compatibility import get_key_matrix
|
|
if i > 0:
|
|
base_key = get_key_matrix().suggest_key_change(base_key, "fifth_up") or base_key
|
|
|
|
# Generar config
|
|
palette = _select_anchor_folders(style, base_key, bpm) if i == 0 else shared_palette
|
|
if i == 0:
|
|
shared_palette = palette # Reutilizar palette para coherencia
|
|
|
|
config = generator.generate_config(
|
|
genre=style.replace("_peak", "").replace("_industrial", ""),
|
|
style=style,
|
|
bpm=bpm,
|
|
key=base_key,
|
|
structure="standard",
|
|
palette=palette
|
|
)
|
|
|
|
generated_tracks.append({
|
|
"track_number": i + 1,
|
|
"style": style,
|
|
"bpm": bpm,
|
|
"key": base_key,
|
|
"palette_linked": i > 0,
|
|
"estimated_duration_min": track_duration_min
|
|
})
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "generate_dj_set",
|
|
"duration_hours": duration_hours,
|
|
"style_evolution": style_evolution,
|
|
"num_tracks": num_tracks,
|
|
"tracks": generated_tracks,
|
|
"total_estimated_duration_min": num_tracks * track_duration_min,
|
|
"palette_shared": shared_palette,
|
|
"note": "Tracks designed to mix seamlessly with shared palette"
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def analyze_trends_library(ctx: Context, min_generations: int = 10) -> str:
|
|
"""
|
|
T097-T099: Analiza tendencias de la librería y características de éxito.
|
|
|
|
Args:
|
|
min_generations: Mínimo de generaciones necesarias para análisis
|
|
|
|
Análisis de Beatport-style: identifica hot zones y características comunes
|
|
de drops con mejor rating.
|
|
"""
|
|
try:
|
|
ratings_path = Path.home() / ".abletonmcp_ai" / "generation_ratings.json"
|
|
|
|
if not ratings_path.exists():
|
|
return json.dumps({
|
|
"status": "insufficient_data",
|
|
"message": f"Need at least {min_generations} rated generations"
|
|
}, indent=2)
|
|
|
|
with open(ratings_path, 'r') as f:
|
|
ratings = json.load(f)
|
|
|
|
if len(ratings) < min_generations:
|
|
return json.dumps({
|
|
"status": "insufficient_data",
|
|
"generations_rated": len(ratings),
|
|
"required": min_generations
|
|
}, indent=2)
|
|
|
|
# Filtrar solo ratings buenos (4-5 estrellas)
|
|
good_ratings = [r for r in ratings if r["score"] >= 4]
|
|
|
|
if len(good_ratings) < 5:
|
|
return json.dumps({
|
|
"status": "insufficient_good_ratings",
|
|
"good_ratings": len(good_ratings),
|
|
"needed": 5
|
|
}, indent=2)
|
|
|
|
# Análisis de características comunes
|
|
common_keys = {}
|
|
common_bpms = {}
|
|
common_palettes = {}
|
|
spectral_profiles = {"bright": 0, "warm": 0, "dark": 0}
|
|
|
|
for r in good_ratings:
|
|
manifest = r.get("manifest", {})
|
|
|
|
# Key
|
|
key = manifest.get("key", "unknown")
|
|
common_keys[key] = common_keys.get(key, 0) + 1
|
|
|
|
# BPM
|
|
bpm = manifest.get("bpm", 0)
|
|
if bpm > 0:
|
|
bpm_range = int(bpm / 5) * 5 # Agrupar por rangos de 5
|
|
common_bpms[bpm_range] = common_bpms.get(bpm_range, 0) + 1
|
|
|
|
# Palettes
|
|
palette = manifest.get("palette", {})
|
|
for bus, folder in palette.items():
|
|
key = f"{bus}:{folder}"
|
|
common_palettes[key] = common_palettes.get(key, 0) + 1
|
|
|
|
# Hot zones
|
|
hot_keys = sorted(common_keys.items(), key=lambda x: x[1], reverse=True)[:3]
|
|
hot_bpms = sorted(common_bpms.items(), key=lambda x: x[1], reverse=True)[:3]
|
|
hot_palettes = sorted(common_palettes.items(), key=lambda x: x[1], reverse=True)[:5]
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "analyze_trends_library",
|
|
"generations_analyzed": len(good_ratings),
|
|
"hot_zones": {
|
|
"keys": [{"key": k, "count": v} for k, v in hot_keys],
|
|
"bpm_ranges": [{"bpm_range": f"{b}-{b+4}", "count": v} for b, v in hot_bpms],
|
|
"palette_folders": [{"folder": p.split(':')[1], "bus": p.split(':')[0], "count": v} for p, v in hot_palettes]
|
|
},
|
|
"trend_summary": f"Hot: Keys {[k for k,_ in hot_keys]}, BPMs {[b for b,_ in hot_bpms]}",
|
|
"recommendation": "Focus on these characteristics for next generation"
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def auto_improve_set(ctx: Context, session_id: str,
|
|
low_score_threshold: int = 3) -> str:
|
|
"""
|
|
T100: Auto-mejora del set regenerando secciones con bajo score.
|
|
|
|
Args:
|
|
session_id: ID de la sesión a mejorar
|
|
low_score_threshold: Score mínimo aceptable (default 3)
|
|
|
|
Regenera secciones problemáticas sin tocar las que funcionaron bien.
|
|
"""
|
|
try:
|
|
ratings_path = Path.home() / ".abletonmcp_ai" / "generation_ratings.json"
|
|
|
|
if not ratings_path.exists():
|
|
return json.dumps({"error": "No ratings database found"}, indent=2)
|
|
|
|
with open(ratings_path, 'r') as f:
|
|
ratings = json.load(f)
|
|
|
|
# Encontrar rating del session_id
|
|
session_rating = None
|
|
for r in ratings:
|
|
if r.get("session_id") == session_id:
|
|
session_rating = r
|
|
break
|
|
|
|
if not session_rating:
|
|
return json.dumps({"error": f"Session {session_id} not found"}, indent=2)
|
|
|
|
score = session_rating.get("score", 0)
|
|
|
|
if score >= low_score_threshold:
|
|
return json.dumps({
|
|
"status": "no_action_needed",
|
|
"session_id": session_id,
|
|
"score": score,
|
|
"message": "Score is acceptable, no regeneration needed"
|
|
}, indent=2)
|
|
|
|
# Analizar notas para identificar problemas
|
|
notes = session_rating.get("notes", "").lower()
|
|
manifest = session_rating.get("manifest", {})
|
|
|
|
improvement_plan = {
|
|
"session_id": session_id,
|
|
"original_score": score,
|
|
"issues_identified": [],
|
|
"regeneration_strategy": {}
|
|
}
|
|
|
|
# Detectar problemas comunes
|
|
if "kick" in notes or "bass" in notes:
|
|
improvement_plan["issues_identified"].append("drums_bass")
|
|
improvement_plan["regeneration_strategy"]["drums"] = "select_new_samples"
|
|
|
|
if "key" in notes or "disonante" in notes or "clash" in notes:
|
|
improvement_plan["issues_identified"].append("key_compatibility")
|
|
improvement_plan["regeneration_strategy"]["harmonic"] = "enforce_key_matching"
|
|
|
|
if "boring" in notes or "repetitive" in notes:
|
|
improvement_plan["issues_identified"].append("variation")
|
|
improvement_plan["regeneration_strategy"]["fills"] = "increase_density"
|
|
|
|
if not improvement_plan["issues_identified"]:
|
|
improvement_plan["regeneration_strategy"]["general"] = "fresh_generation"
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "auto_improve_set",
|
|
"session_id": session_id,
|
|
"improvement_plan": improvement_plan,
|
|
"recommendation": "Regenerate with strategy: " + str(improvement_plan["regeneration_strategy"]),
|
|
"next_step": "Use generate_song() with improved parameters"
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
# ============================================================================
|
|
# INFRASTRUCTURA: DASHBOARD & METRICS TOOLS (T108)
|
|
# ============================================================================
|
|
|
|
@mcp.tool()
|
|
def get_system_metrics(ctx: Context) -> str:
|
|
"""
|
|
T108: Dashboard de métricas del sistema.
|
|
|
|
Retorna métricas completas:
|
|
- Generaciones totales
|
|
- Cobertura de librería %
|
|
- Promedio de estrellas
|
|
- Estado de salud del sistema
|
|
"""
|
|
try:
|
|
import os
|
|
from pathlib import Path
|
|
|
|
metrics = {
|
|
"system_health": "healthy",
|
|
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
|
|
"generations": {},
|
|
"coverage": {},
|
|
"ratings": {},
|
|
"library": {},
|
|
"performance": {}
|
|
}
|
|
|
|
# 1. Generaciones totales
|
|
ratings_path = Path.home() / ".abletonmcp_ai" / "generation_ratings.json"
|
|
if ratings_path.exists():
|
|
with open(ratings_path, 'r') as f:
|
|
ratings = json.load(f)
|
|
metrics["generations"]["total_rated"] = len(ratings)
|
|
metrics["generations"]["average_score"] = round(
|
|
sum(r["score"] for r in ratings) / len(ratings), 2
|
|
) if ratings else 0
|
|
else:
|
|
metrics["generations"]["total_rated"] = 0
|
|
metrics["generations"]["average_score"] = 0
|
|
|
|
# 2. Cobertura de librería
|
|
coverage_path = Path.home() / ".abletonmcp_ai" / "collection_coverage.json"
|
|
if coverage_path.exists():
|
|
with open(coverage_path, 'r') as f:
|
|
coverage = json.load(f)
|
|
total_folders = len(coverage)
|
|
used_folders = len([f for f in coverage.values() if f.get("uses", 0) > 0])
|
|
metrics["coverage"]["total_folders"] = total_folders
|
|
metrics["coverage"]["used_folders"] = used_folders
|
|
metrics["coverage"]["percentage"] = round(
|
|
(used_folders / total_folders * 100), 2
|
|
) if total_folders > 0 else 0
|
|
else:
|
|
metrics["coverage"]["percentage"] = 0
|
|
|
|
# 3. Fatiga de samples
|
|
global _sample_fatigue
|
|
metrics["library"]["samples_in_fatigue"] = len(_sample_fatigue)
|
|
|
|
# 4. Diversidad
|
|
from song_generator import get_cross_generation_state
|
|
families, paths = get_cross_generation_state()
|
|
metrics["library"]["families_used_session"] = len(families)
|
|
metrics["library"]["samples_used_session"] = len(paths)
|
|
|
|
# 5. Performance - tiempos de respuesta promedio
|
|
# (Esto sería mejor con logging real de latencias)
|
|
metrics["performance"]["status"] = "nominal"
|
|
|
|
# 6. Estado general
|
|
health_score = 100
|
|
if metrics["coverage"]["percentage"] < 50:
|
|
health_score -= 20
|
|
if metrics["generations"]["average_score"] < 3.0:
|
|
health_score -= 20
|
|
if metrics["library"]["samples_in_fatigue"] < 10:
|
|
health_score -= 10
|
|
|
|
metrics["system_health_score"] = health_score
|
|
metrics["system_health"] = "healthy" if health_score >= 80 else "degraded" if health_score >= 60 else "critical"
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"action": "get_system_metrics",
|
|
"dashboard": metrics,
|
|
"summary": {
|
|
"total_generations": metrics["generations"]["total_rated"],
|
|
"avg_rating": metrics["generations"]["average_score"],
|
|
"library_coverage": f"{metrics['coverage']['percentage']}%",
|
|
"health": metrics["system_health"],
|
|
"health_score": health_score
|
|
}
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def get_generation_history(ctx: Context, limit: int = 10) -> str:
|
|
"""
|
|
Obtiene historial de generaciones recientes.
|
|
|
|
Args:
|
|
limit: Número de generaciones a retornar (default 10)
|
|
"""
|
|
try:
|
|
ratings_path = Path.home() / ".abletonmcp_ai" / "generation_ratings.json"
|
|
|
|
if not ratings_path.exists():
|
|
return json.dumps({
|
|
"status": "no_data",
|
|
"history": []
|
|
}, indent=2)
|
|
|
|
with open(ratings_path, 'r') as f:
|
|
ratings = json.load(f)
|
|
|
|
# Ordenar por timestamp descendente
|
|
sorted_ratings = sorted(ratings, key=lambda x: x.get("timestamp", ""), reverse=True)
|
|
recent = sorted_ratings[:limit]
|
|
|
|
# Resumir para no enviar datos masivos
|
|
summary = []
|
|
for r in recent:
|
|
manifest = r.get("manifest", {})
|
|
summary.append({
|
|
"session_id": r.get("session_id", "unknown"),
|
|
"timestamp": r.get("timestamp", ""),
|
|
"score": r.get("score", 0),
|
|
"genre": manifest.get("genre", "unknown"),
|
|
"bpm": manifest.get("bpm", 0),
|
|
"key": manifest.get("key", "unknown"),
|
|
"notes_preview": r.get("notes", "")[:50] + "..." if len(r.get("notes", "")) > 50 else r.get("notes", "")
|
|
})
|
|
|
|
return json.dumps({
|
|
"status": "success",
|
|
"total_generations": len(ratings),
|
|
"showing": len(summary),
|
|
"history": summary
|
|
}, indent=2)
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
@mcp.tool()
|
|
def export_system_report(ctx: Context, format: str = "json") -> str:
|
|
"""
|
|
T108: Exporta reporte completo del sistema para análisis externo.
|
|
|
|
Args:
|
|
format: Formato de exportación ('json', 'csv', 'markdown')
|
|
|
|
Retorna reporte completo con todas las métricas.
|
|
"""
|
|
try:
|
|
# Obtener métricas
|
|
metrics_response = get_system_metrics(ctx)
|
|
metrics_data = json.loads(metrics_response)
|
|
|
|
if format == "json":
|
|
return json.dumps({
|
|
"status": "success",
|
|
"format": "json",
|
|
"report": metrics_data,
|
|
"export_timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
|
|
}, indent=2)
|
|
|
|
elif format == "markdown":
|
|
# Crear reporte en markdown
|
|
dash = metrics_data.get("dashboard", {})
|
|
md = f"""# AbletonMCP-AI System Report
|
|
Generated: {time.strftime("%Y-%m-%d %H:%M:%S")}
|
|
|
|
## System Health
|
|
- Status: {dash.get("system_health", "unknown")}
|
|
- Health Score: {dash.get("system_health_score", 0)}/100
|
|
|
|
## Generations
|
|
- Total Rated: {dash.get("generations", {}).get("total_rated", 0)}
|
|
- Average Score: {dash.get("generations", {}).get("average_score", 0)}/5
|
|
|
|
## Library Coverage
|
|
- Folders Used: {dash.get("coverage", {}).get("used_folders", 0)}/{dash.get("coverage", {}).get("total_folders", 0)}
|
|
- Coverage: {dash.get("coverage", {}).get("percentage", 0)}%
|
|
|
|
## Current Session
|
|
- Samples in Fatigue: {dash.get("library", {}).get("samples_in_fatigue", 0)}
|
|
- Families Used: {dash.get("library", {}).get("families_used_session", 0)}
|
|
"""
|
|
return json.dumps({
|
|
"status": "success",
|
|
"format": "markdown",
|
|
"report": md
|
|
}, indent=2)
|
|
|
|
else:
|
|
return json.dumps({"error": f"Unsupported format: {format}"}, indent=2)
|
|
|
|
except Exception as e:
|
|
return json.dumps({"error": str(e)}, indent=2)
|
|
|
|
|
|
# ============================================================================
|
|
# MAIN
|
|
# ============================================================================
|
|
|
|
def main():
|
|
"""Punto de entrada principal"""
|
|
import argparse
|
|
|
|
parser = argparse.ArgumentParser(description="AbletonMCP-AI Server")
|
|
parser.add_argument("--port", type=int, default=0, help="Puerto para el servidor MCP (0 = auto)")
|
|
parser.add_argument("--transport", type=str, default="stdio", choices=["stdio", "sse"], help="Transporte MCP")
|
|
args = parser.parse_args()
|
|
|
|
print("=" * 60)
|
|
print("AbletonMCP-AI Server")
|
|
print("=" * 60)
|
|
print(f"Transporte: {args.transport}")
|
|
print(f"Conectando a Ableton en: {HOST}:{DEFAULT_PORT}")
|
|
print("-" * 60)
|
|
|
|
# Iniciar servidor MCP
|
|
mcp.run(transport=args.transport)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|