Files
AbletonMCP_AI/AbletonMCP_AI/mcp_server/server.py

7200 lines
300 KiB
Python
Raw Blame History

This file contains invisible Unicode characters
This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
"""
AbletonMCP_AI MCP Server - Clean FastMCP server for Ableton Live 12.
Communicates with the Ableton Remote Script via TCP socket on port 9877.
"""
import json
import logging
import os
import socket
import sys
import time
from contextlib import asynccontextmanager
from pathlib import Path
from typing import Optional
from mcp.server.fastmcp import FastMCP, Context
# ------------------------------------------------------------------
# Paths
# ------------------------------------------------------------------
BASE_DIR = Path(__file__).resolve().parent.parent.parent # MIDI Remote Scripts root
PROJECT_DIR = Path(__file__).resolve().parent.parent # AbletonMCP_AI
MCP_DIR = Path(__file__).resolve().parent # AbletonMCP_AI/mcp
ENGINE_DIR = MCP_DIR / "engines"
# Add engine dir to path so we can import them
for p in (str(ENGINE_DIR), str(MCP_DIR), str(PROJECT_DIR)):
if p not in sys.path:
sys.path.insert(0, p)
# ------------------------------------------------------------------
# Logging
# ------------------------------------------------------------------
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(name)s] %(levelname)s: %(message)s")
logger = logging.getLogger("AbletonMCP-AI")
# ------------------------------------------------------------------
# Ableton TCP connection
# ------------------------------------------------------------------
ABLETON_HOST = "127.0.0.1"
ABLETON_PORT = 9877
TERMINATOR = b"\n"
# Tool timeouts (seconds)
TIMEOUTS = {
"get_session_info": 5.0,
"get_tracks": 5.0,
"get_scenes": 5.0,
"get_master_info": 5.0,
"set_tempo": 10.0,
"start_playback": 10.0,
"stop_playback": 10.0,
"toggle_playback": 10.0,
"stop_all_clips": 10.0,
"clear_project": 30.0,
"create_midi_track": 15.0,
"create_audio_track": 15.0,
"set_track_name": 10.0,
"set_track_volume": 10.0,
"set_track_pan": 10.0,
"set_track_mute": 10.0,
"set_track_solo": 10.0,
"set_master_volume": 10.0,
"create_clip": 15.0,
"add_notes_to_clip": 15.0,
"fire_clip": 10.0,
"fire_scene": 10.0,
"set_scene_name": 10.0,
"create_scene": 15.0,
"set_metronome": 10.0,
"set_loop": 10.0,
"set_signature": 10.0,
"create_arrangement_audio_pattern": 30.0,
"load_sample_to_drum_rack": 30.0,
"generate_track": 300.0,
"generate_song": 300.0,
"select_samples_for_genre": 30.0,
# Sprint 2 - Phase 1 & 2: Advanced Production Tools
"generate_complete_reggaeton": 60.0,
"generate_from_reference": 60.0,
"load_sample_to_clip": 15.0,
"duplicate_clip": 15.0,
"create_arrangement_audio_clip": 20.0,
"set_warp_markers": 15.0,
"reverse_clip": 10.0,
"pitch_shift_clip": 15.0,
"time_stretch_clip": 15.0,
"slice_clip": 20.0,
# Fase 3: Mixing & Effects
"create_bus_track": 15.0,
"route_track_to_bus": 10.0,
"create_return_track": 15.0,
"set_track_send": 10.0,
"insert_device": 15.0,
"configure_eq": 15.0,
"configure_compressor": 15.0,
"setup_sidechain": 15.0,
"auto_gain_staging": 20.0,
"analyze_levels": 15.0,
"apply_master_chain": 20.0,
# Fase 4: Workflow & Export
"export_project": 60.0,
"get_project_summary": 10.0,
"suggest_improvements": 15.0,
"validate_project": 15.0,
"humanize_track": 15.0,
# Phase 1 & 2 - Bridge Engines to Ableton (T001-T040)
"produce_reggaeton": 300.0,
"produce_from_reference": 300.0,
"produce_arrangement": 300.0,
"complete_production": 300.0,
"batch_produce": 600.0,
"generate_midi_clip": 30.0,
"generate_dembow_clip": 30.0,
"generate_bass_clip": 30.0,
"generate_chords_clip": 30.0,
"generate_melody_clip": 30.0,
"create_drum_kit": 30.0,
"build_track_from_samples": 60.0,
"generate_track_from_config": 120.0,
"generate_section": 60.0,
"apply_human_feel": 30.0,
"add_percussion_fills": 30.0,
# Phase 2 - Arrangement & Automation
"build_arrangement_structure": 60.0,
"create_arrangement_midi_clip": 30.0,
"create_arrangement_audio_clip": 30.0,
"fill_arrangement_with_song": 300.0,
"automate_filter": 30.0,
"create_fx_automation": 30.0,
# Musical intelligence / workflow / quality
"analyze_project_key": 20.0,
"harmonize_track": 30.0,
"generate_counter_melody": 30.0,
"detect_energy_curve": 20.0,
"balance_sections": 20.0,
"variate_loop": 30.0,
"add_call_and_response": 30.0,
"generate_breakdown": 30.0,
"generate_drop_variation": 30.0,
"create_outro": 30.0,
"render_stems": 120.0,
"render_full_mix": 120.0,
"render_instrumental": 120.0,
"full_quality_check": 30.0,
"fix_quality_issues": 60.0,
"duplicate_project": 30.0,
# Intelligent Track Generation (T200+)
"generate_intelligent_track": 300.0,
"generate_expansive_track": 600.0,
"create_radio_edit": 60.0,
"create_dj_edit": 60.0,
"undo": 10.0,
"redo": 10.0,
"save_checkpoint": 20.0,
"health_check": 10.0,
# Agente 3: Transitions & Fills
"create_fx_hit": 30.0,
"create_transition_fill": 30.0,
"create_intro_buildup": 30.0,
# Agente 4: White Noise Generator
"create_white_noise": 30.0,
# Agente 5: Multi-Parameter Automation
"add_parameter_automation": 30.0,
# Agente 8: Parallel Compression System
"create_parallel_compression": 30.0,
# Agente 13: Extended Chords Engine
"generate_advanced_chords": 30.0,
# Agente 14: Professional Melody Engine (motivic)
"generate_motivic_melody": 30.0,
# Agente 15: Reggaeton Rhythm Patterns Library
"get_rhythmic_pattern": 15.0,
# Agente 18: Professional Workflow Orchestrator
"produce_professional_track": 600.0,
# Agente 12: VST/AU Plugin Support
"load_vst_plugin": 30.0,
"configure_vst_parameter": 15.0,
"scan_vst_plugins": 30.0,
"get_vst_presets": 15.0,
# Agente 19: Quality Assurance Suite
"validate_project_qa": 15.0,
"suggest_improvements_qa": 15.0,
# Sprint 5: DJ Professional Track
"generate_dj_professional_track": 600.0,
# Sprint 5.5: Advanced Production Tools
"inject_sample_batch": 10.0,
"validate_coherence": 15.0,
"build_section_real": 15.0,
"select_coherent_kit": 20.0,
"produce_radio_edit_4min": 600.0,
"get_production_progress": 5.0,
# BPM Analyzer Integration
"analyze_all_bpm": 600.0, # 10 minutes for analyzing 800+ samples
"select_bpm_coherent_pool": 20.0,
"warp_clip_to_bpm": 30.0,
# Spectral Coherence Production
"produce_with_spectral_coherence": 300.0,
}
def _send_to_ableton(cmd_type: str, params: dict = None, timeout: float = 15.0) -> dict:
"""Send a command to the Ableton Remote Script and return the response."""
sock = None
try:
sock = socket.create_connection((ABLETON_HOST, ABLETON_PORT), timeout=timeout)
sock.settimeout(timeout)
msg = json.dumps({"type": cmd_type, "params": params or {}}) + "\n"
sock.sendall(msg.encode("utf-8"))
buf = b""
while True:
chunk = sock.recv(65536)
if not chunk:
break
buf += chunk
if TERMINATOR in buf:
raw, _, _ = buf.partition(TERMINATOR)
return json.loads(raw.decode("utf-8"))
return {"status": "error", "message": "No response terminator received"}
except socket.timeout:
return {"status": "error", "message": f"Command '{cmd_type}' timed out after {timeout}s"}
except ConnectionRefusedError:
return {"status": "error", "message": f"Cannot connect to Ableton on {ABLETON_HOST}:{ABLETON_PORT}. Is the Remote Script loaded?"}
except Exception as e:
return {"status": "error", "message": str(e)}
finally:
if sock:
try:
sock.close()
except Exception:
pass
def _ok(data: dict) -> str:
return json.dumps({"status": "success", "result": data}, indent=2)
def _err(msg: str) -> str:
return json.dumps({"status": "error", "message": msg}, indent=2)
def _ableton_result(resp: dict) -> dict:
"""Return the nested Ableton payload when present."""
result = resp.get("result", {})
return result if isinstance(result, dict) else {}
def _proxy_ableton_command(cmd_type: str, params: dict = None, timeout: Optional[float] = None,
defaults: dict = None) -> str:
"""Execute a TCP command against Ableton and wrap the nested result."""
resp = _send_to_ableton(cmd_type, params or {}, timeout=timeout or TIMEOUTS.get(cmd_type, 15.0))
if resp.get("status") != "success":
return _err(resp.get("message", "Unknown error"))
payload = dict(defaults or {})
payload.update(_ableton_result(resp))
return _ok(payload)
def _warm_engine_imports() -> None:
"""Preload heavy engine modules before the first MCP tool call.
FastMCP handles tool calls on the request path. Some lazy imports work fine in
direct Python calls but stall badly when they happen inside a live stdio
CallToolRequest. Warming the heavy workflow modules at startup keeps those
imports off the request path and avoids false MCP timeouts.
"""
warmers = [
("ProductionWorkflow", lambda: __import__("engines.production_workflow", fromlist=["ProductionWorkflow"]).ProductionWorkflow()),
("WorkflowEngine", lambda: __import__("engines.workflow_engine", fromlist=["WorkflowEngine"]).WorkflowEngine()),
("MusicalIntelligenceEngine", lambda: __import__("engines.musical_intelligence", fromlist=["MusicalIntelligenceEngine"]).MusicalIntelligenceEngine()),
]
for name, warmer in warmers:
try:
warmer()
logger.info("Warm preload ready: %s", name)
except Exception:
logger.exception("Warm preload failed: %s", name)
# ------------------------------------------------------------------
# Lifespan / startup
# ------------------------------------------------------------------
@asynccontextmanager
async def server_lifespan(server: FastMCP):
logger.info("AbletonMCP-AI Server starting...")
_warm_engine_imports()
# Non-blocking: try to connect to Ableton but don't block startup if unavailable
try:
sock = socket.create_connection((ABLETON_HOST, ABLETON_PORT), timeout=2.0)
sock.settimeout(2.0)
msg = json.dumps({"type": "get_session_info", "params": {}}) + "\n"
sock.sendall(msg.encode("utf-8"))
buf = b""
sock.settimeout(3.0)
try:
while TERMINATOR not in buf:
chunk = sock.recv(4096)
if not chunk:
break
buf += chunk
if TERMINATOR in buf:
raw = buf.split(TERMINATOR)[0]
info = json.loads(raw.decode("utf-8"))
r = info.get("result", {})
logger.info("Connected to Ableton Live: %d BPM, %d tracks",
r.get("tempo", 0), r.get("num_tracks", 0))
except Exception:
logger.warning("Ableton connected but session info unavailable")
sock.close()
except ConnectionRefusedError:
logger.warning("Ableton Live not reachable on %s:%d. Load AbletonMCP_AI as Control Surface.", ABLETON_HOST, ABLETON_PORT)
except Exception as e:
logger.warning("Ableton connection check failed: %s", str(e))
yield
logger.info("AbletonMCP-AI Server shutting down")
mcp = FastMCP("Ableton Live MCP", lifespan=server_lifespan)
# ==================================================================
# DEBUG - No dependencies, always works
# ==================================================================
@mcp.tool()
def ping(ctx: Context) -> str:
"""Simple ping test. Use this to verify MCP connectivity without needing Ableton."""
tool_count = len(getattr(getattr(mcp, "_tool_manager", None), "_tools", {}))
return json.dumps({"status": "ok", "message": "pong", "tools": tool_count})
# ==================================================================
# INFO TOOLS
# ==================================================================
@mcp.tool()
def get_session_info(ctx: Context) -> str:
"""Get current Ableton Live session information."""
resp = _send_to_ableton("get_session_info", timeout=TIMEOUTS["get_session_info"])
if resp.get("status") == "success":
r = resp["result"]
return _ok({
"tempo": r.get("tempo"),
"num_tracks": r.get("num_tracks"),
"num_scenes": r.get("num_scenes"),
"is_playing": r.get("is_playing"),
"current_song_time": r.get("current_song_time"),
"metronome": r.get("metronome"),
"master_volume": r.get("master_volume"),
})
return _err(resp.get("message", "Unknown error"))
@mcp.tool()
def get_tracks(ctx: Context) -> str:
"""Get list of all tracks in the current project."""
resp = _send_to_ableton("get_tracks", timeout=TIMEOUTS["get_tracks"])
if resp.get("status") == "success":
return _ok(resp.get("result", {}))
return _err(resp.get("message", "Unknown error"))
@mcp.tool()
def get_scenes(ctx: Context) -> str:
"""Get list of all scenes."""
resp = _send_to_ableton("get_scenes", timeout=TIMEOUTS["get_scenes"])
if resp.get("status") == "success":
return _ok(resp.get("result", {}))
return _err(resp.get("message", "Unknown error"))
@mcp.tool()
def get_arrangement_clips(ctx: Context, track_index: int = None) -> str:
"""Read all clips currently placed in Arrangement View.
Use this to understand the current song structure — which clips exist,
where they start, how long they are, and which tracks they're on.
Essential for understanding a project before modifying it.
Args:
track_index: Optional. If provided, only returns clips for that track.
If omitted, returns clips for all tracks.
Returns:
- clips: list with track_index, track_name, name, start_time (beats),
end_time, length, is_midi, color, muted, looping
- total_clips: total count
- arrangement_length_beats: total song length in beats
- unique_start_positions: sorted list of clip start points (bar map)
"""
params = {}
if track_index is not None:
params["track_index"] = track_index
return _proxy_ableton_command("get_arrangement_clips", params, timeout=30.0)
@mcp.tool()
def get_master_info(ctx: Context) -> str:
"""Get master track information."""
resp = _send_to_ableton("get_master_info", timeout=TIMEOUTS["get_master_info"])
if resp.get("status") == "success":
return _ok(resp.get("result", {}))
return _err(resp.get("message", "Unknown error"))
@mcp.tool()
def health_check(ctx: Context) -> str:
"""T050: Run a comprehensive health check of the AbletonMCP_AI system.
Runs 5 checks:
1. TCP server connection
2. Song accessibility
3. Tracks accessibility
4. Browser accessibility
5. update_display drain loop active
Returns a score 0-5 with detailed status for each check.
This should be the first command run after opening Ableton.
"""
resp = _send_to_ableton("health_check", timeout=TIMEOUTS["health_check"])
if resp.get("status") == "success":
r = resp.get("result", {})
score = r.get("score", 0)
status = r.get("status", "UNKNOWN")
checks = r.get("checks", [])
recommendation = r.get("recommendation", "")
check_summary = []
for c in checks:
icon = "OK" if c.get("passed") else "FAIL"
check_summary.append(" [%s] %s: %s" % (icon, c.get("name", "?"), c.get("detail", "")))
return _ok({
"score": "%d/5" % score,
"status": status,
"checks": check_summary,
"recommendation": recommendation,
})
return _err(resp.get("message", "Unknown error"))
# ==================================================================
# TRANSPORT
# ==================================================================
@mcp.tool()
def start_playback(ctx: Context) -> str:
"""Start playback."""
resp = _send_to_ableton("start_playback", timeout=TIMEOUTS["start_playback"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def stop_playback(ctx: Context) -> str:
"""Stop playback."""
resp = _send_to_ableton("stop_playback", timeout=TIMEOUTS["stop_playback"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def toggle_playback(ctx: Context) -> str:
"""Toggle playback (start if stopped, stop if playing)."""
resp = _send_to_ableton("toggle_playback", timeout=TIMEOUTS["toggle_playback"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def stop_all_clips(ctx: Context) -> str:
"""Stop all clips in Session View."""
resp = _send_to_ableton("stop_all_clips", timeout=TIMEOUTS["stop_all_clips"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def clear_project(ctx: Context) -> str:
"""Clear entire project - delete all tracks and clips. Useful for starting fresh.
Returns:
Confirmation message with number of tracks deleted.
"""
resp = _send_to_ableton("clear_project", timeout=TIMEOUTS["clear_project"])
if resp.get("status") == "success":
result = resp.get("result", {})
deleted = result.get("tracks_deleted", 0)
return _ok("Project cleared. %d tracks deleted. Ready for new production." % deleted)
return _err(resp.get("message", "Failed to clear project"))
# ==================================================================
# PROJECT SETTINGS
# ==================================================================
@mcp.tool()
def set_tempo(ctx: Context, tempo: float) -> str:
"""Set the project tempo in BPM."""
if not 20 <= tempo <= 300:
return _err(f"Invalid tempo: {tempo}. Must be 20-300 BPM.")
resp = _send_to_ableton("set_tempo", {"tempo": tempo}, timeout=TIMEOUTS["set_tempo"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def set_time_signature(ctx: Context, numerator: int = 4, denominator: int = 4) -> str:
"""Set the project time signature."""
resp = _send_to_ableton("set_signature", {"numerator": numerator, "denominator": denominator},
timeout=TIMEOUTS["set_signature"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def set_metronome(ctx: Context, enabled: bool) -> str:
"""Enable or disable metronome."""
resp = _send_to_ableton("set_metronome", {"enabled": enabled}, timeout=TIMEOUTS["set_metronome"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
# ==================================================================
# TRACKS
# ==================================================================
@mcp.tool()
def create_midi_track(ctx: Context, index: int = -1) -> str:
"""Create a new MIDI track. index=-1 appends at the end."""
resp = _send_to_ableton("create_midi_track", {"index": index}, timeout=TIMEOUTS["create_midi_track"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def create_audio_track(ctx: Context, index: int = -1) -> str:
"""Create a new audio track. index=-1 appends at the end."""
resp = _send_to_ableton("create_audio_track", {"index": index}, timeout=TIMEOUTS["create_audio_track"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def set_track_name(ctx: Context, track_index: int, name: str) -> str:
"""Set the name of a track."""
resp = _send_to_ableton("set_track_name", {"track_index": track_index, "name": name},
timeout=TIMEOUTS["set_track_name"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def set_track_volume(ctx: Context, track_index: int, volume: float) -> str:
"""Set track volume (0.0 - 1.0)."""
if not 0.0 <= volume <= 1.0:
return _err(f"Invalid volume: {volume}. Must be 0.0-1.0.")
resp = _send_to_ableton("set_track_volume", {"track_index": track_index, "volume": volume},
timeout=TIMEOUTS["set_track_volume"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def set_track_pan(ctx: Context, track_index: int, pan: float) -> str:
"""Set track pan (-1.0 left to 1.0 right)."""
if not -1.0 <= pan <= 1.0:
return _err(f"Invalid pan: {pan}. Must be -1.0 to 1.0.")
resp = _send_to_ableton("set_track_pan", {"track_index": track_index, "pan": pan},
timeout=TIMEOUTS["set_track_pan"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def set_track_mute(ctx: Context, track_index: int, mute: bool) -> str:
"""Mute or unmute a track."""
resp = _send_to_ableton("set_track_mute", {"track_index": track_index, "mute": mute},
timeout=TIMEOUTS["set_track_mute"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def set_track_solo(ctx: Context, track_index: int, solo: bool) -> str:
"""Solo or unsolo a track."""
resp = _send_to_ableton("set_track_solo", {"track_index": track_index, "solo": solo},
timeout=TIMEOUTS["set_track_solo"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def set_master_volume(ctx: Context, volume: float) -> str:
"""Set master track volume (0.0 - 1.0)."""
if not 0.0 <= volume <= 1.0:
return _err(f"Invalid volume: {volume}. Must be 0.0-1.0.")
resp = _send_to_ableton("set_master_volume", {"volume": volume}, timeout=TIMEOUTS["set_master_volume"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
# ==================================================================
# CLIPS & SESSION VIEW
# ==================================================================
@mcp.tool()
def create_clip(ctx: Context, track_index: int, clip_index: int = 0, length: float = 4.0) -> str:
"""Create a MIDI clip in Session View."""
resp = _send_to_ableton("create_clip", {"track_index": track_index, "clip_index": clip_index, "length": length},
timeout=TIMEOUTS["create_clip"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def add_notes_to_clip(ctx: Context, track_index: int, clip_index: int, notes: list) -> str:
"""Add MIDI notes to a clip. notes is a list of dicts with keys: pitch, start_time, duration, velocity."""
resp = _send_to_ableton("add_notes_to_clip",
{"track_index": track_index, "clip_index": clip_index, "notes": notes},
timeout=TIMEOUTS["add_notes_to_clip"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def fire_clip(ctx: Context, track_index: int, clip_index: int = 0) -> str:
"""Fire a clip in Session View."""
resp = _send_to_ableton("fire_clip", {"track_index": track_index, "clip_index": clip_index},
timeout=TIMEOUTS["fire_clip"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def fire_scene(ctx: Context, scene_index: int) -> str:
"""Fire a scene in Session View."""
resp = _send_to_ableton("fire_scene", {"scene_index": scene_index}, timeout=TIMEOUTS["fire_scene"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def set_scene_name(ctx: Context, scene_index: int, name: str) -> str:
"""Set the name of a scene."""
resp = _send_to_ableton("set_scene_name", {"scene_index": scene_index, "name": name},
timeout=TIMEOUTS["set_scene_name"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def create_scene(ctx: Context, index: int = -1) -> str:
"""Create a new scene."""
resp = _send_to_ableton("create_scene", {"index": index}, timeout=TIMEOUTS["create_scene"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
# ==================================================================
# ARRANGEMENT VIEW - Audio clips
# ==================================================================
@mcp.tool()
def create_arrangement_audio_pattern(ctx: Context, track_index: int, file_path: str,
positions: list = None, name: str = "") -> str:
"""Create audio clips in Arrangement View from a .wav file."""
if positions is None:
positions = [0]
resp = _send_to_ableton("create_arrangement_audio_pattern",
{"track_index": track_index, "file_path": file_path,
"positions": positions, "name": name},
timeout=TIMEOUTS["create_arrangement_audio_pattern"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
# ==================================================================
# GENERATION & SAMPLE SELECTION
# ==================================================================
@mcp.tool()
def generate_track(ctx: Context, genre: str, style: str = "", bpm: float = 0,
key: str = "", structure: str = "standard") -> str:
"""Generate a track using AI."""
resp = _send_to_ableton("generate_track",
{"genre": genre, "style": style, "bpm": bpm, "key": key, "structure": structure},
timeout=TIMEOUTS["generate_track"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def generate_song(ctx: Context, genre: str, style: str = "", bpm: float = 0,
key: str = "", structure: str = "standard") -> str:
"""Generate a complete song."""
resp = _send_to_ableton("generate_track",
{"genre": genre, "style": style, "bpm": bpm, "key": key, "structure": structure},
timeout=TIMEOUTS["generate_song"])
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def select_samples_for_genre(ctx: Context, genre: str, key: str = "", bpm: float = 0) -> str:
"""Select samples for a genre from the local library."""
# Import the sample selector engine
try:
from engines.sample_selector import SampleSelector, get_selector
selector = get_selector()
if selector is None:
return _err("Sample selector not available. Check libreria/reggaeton path.")
group = selector.select_for_genre(genre, key if key else None, bpm if bpm > 0 else None)
result = {
"genre": group.genre,
"key": group.key,
"bpm": group.bpm,
"drums": {},
"bass": [],
"synths": [],
"fx": [],
}
kit = group.drums
if kit.kick:
result["drums"]["kick"] = kit.kick.name
if kit.snare:
result["drums"]["snare"] = kit.snare.name
if kit.clap:
result["drums"]["clap"] = kit.clap.name
if kit.hat_closed:
result["drums"]["hat_closed"] = kit.hat_closed.name
if kit.hat_open:
result["drums"]["hat_open"] = kit.hat_open.name
result["bass"] = [s.name for s in (group.bass or [])[:5]]
result["synths"] = [s.name for s in (group.synths or [])[:5]]
result["fx"] = [s.name for s in (group.fx or [])[:3]]
return _ok(result)
except ImportError:
return _err("Sample selector engine not available.")
except Exception as e:
return _err(f"Error selecting samples: {str(e)}")
# ==================================================================
# LIBRARY ANALYSIS TOOLS (Sprint 1 Integration)
# ==================================================================
REGGAETON_LIB = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\libreria\reggaeton"
# Cache for expensive engine instances
_analyzer_cache = None
_embedding_cache = None
_matcher_cache = None
def _get_analyzer():
"""Lazy-load the LibreriaAnalyzer with caching."""
global _analyzer_cache
if _analyzer_cache is None:
logger.info("Initializing LibreriaAnalyzer cache")
from engines.libreria_analyzer import LibreriaAnalyzer
_analyzer_cache = LibreriaAnalyzer(REGGAETON_LIB, verbose=False)
logger.info("LibreriaAnalyzer cache ready")
return _analyzer_cache
def _get_embedding_engine():
"""Lazy-load the EmbeddingEngine with caching."""
global _embedding_cache
if _embedding_cache is None:
from engines.embedding_engine import EmbeddingEngine
_embedding_cache = EmbeddingEngine()
return _embedding_cache
def _get_matcher():
"""Lazy-load the ReferenceMatcher with caching."""
global _matcher_cache
if _matcher_cache is None:
from engines.reference_matcher import ReferenceMatcher
ref_path = REGGAETON_LIB + "\\reggaeton_ejemplo.mp3"
_matcher_cache = ReferenceMatcher(reference_path=ref_path if os.path.isfile(ref_path) else None)
return _matcher_cache
@mcp.tool()
def analyze_library(ctx: Context, force_reanalyze: bool = False) -> str:
"""Analyze all samples in the reggaeton library. Extracts BPM, Key, MFCCs, etc."""
try:
analyzer = _get_analyzer()
result = analyzer.analyze_all(force_reanalyze=force_reanalyze)
return _ok({
"total_analyzed": len(result),
"cache_file": str(analyzer.cache_path),
})
except Exception as e:
return _err(f"Error analyzing library: {str(e)}")
@mcp.tool()
def get_library_stats(ctx: Context) -> str:
"""Get statistics about the analyzed library."""
try:
logger.info("get_library_stats: start")
analyzer = _get_analyzer()
# Try to load cache from disk first (fast)
if not analyzer.features:
analyzer._load_cache()
# If still no features, return basic file count without full analysis
if not analyzer.features:
import glob as _glob
audio_files = _glob.glob(os.path.join(REGGAETON_LIB, "**", "*.wav"), recursive=True)
audio_files += _glob.glob(os.path.join(REGGAETON_LIB, "**", "*.mp3"), recursive=True)
audio_files += _glob.glob(os.path.join(REGGAETON_LIB, "**", "*.aif"), recursive=True)
audio_files += _glob.glob(os.path.join(REGGAETON_LIB, "**", "*.flac"), recursive=True)
# Count by folder (role)
roles = {}
for f in audio_files:
parts = f.replace(REGGAETON_LIB, "").split(os.sep)
role = parts[1] if len(parts) > 1 else "unknown"
roles[role] = roles.get(role, 0) + 1
return _ok({
"total_files_found": len(audio_files),
"files_by_role": roles,
"note": "Full spectral analysis not yet performed. Call analyze_library first.",
})
stats = analyzer.get_stats()
logger.info("get_library_stats: done")
return _ok(stats)
except Exception as e:
logger.exception("get_library_stats: failed")
return _err(f"Error getting library stats: {str(e)}")
@mcp.tool()
def get_similar_samples(ctx: Context, sample_path: str, top_n: int = 10) -> str:
"""Find samples similar to a given sample using embeddings."""
try:
emb_engine = _get_embedding_engine()
results = emb_engine.find_similar(sample_path, top_n=top_n)
return _ok({"reference": sample_path, "similar": results})
except Exception as e:
return _err(f"Error finding similar samples: {str(e)}")
@mcp.tool()
def find_samples_like_audio(ctx: Context, audio_path: str, top_n: int = 20, role: str = "") -> str:
"""Find samples similar to an external audio file (e.g., reggaeton_ejemplo.mp3)."""
try:
emb_engine = _get_embedding_engine()
results = emb_engine.find_by_reference(audio_path, top_n=top_n)
if role:
results = [r for r in results if r.get("role", "") == role][:top_n]
return _ok({"reference": audio_path, "similar": results})
except Exception as e:
return _err(f"Error finding samples like audio: {str(e)}")
@mcp.tool()
def get_user_sound_profile(ctx: Context) -> str:
"""Get the user's sound profile based on reggaeton_ejemplo.mp3."""
try:
matcher = _get_matcher()
profile = matcher.get_user_profile()
return _ok(profile)
except Exception as e:
return _err(f"Error getting user profile: {str(e)}")
@mcp.tool()
def get_recommended_samples(ctx: Context, role: str = "", count: int = 5) -> str:
"""Get recommended samples for a role based on user's sound profile."""
try:
from engines.reference_matcher import get_recommended_samples as _rec
results = _rec(role if role else None, count)
return _ok({"role": role or "all", "samples": results})
except Exception as e:
return _err(f"Error getting recommended samples: {str(e)}")
@mcp.tool()
def compare_two_samples(ctx: Context, path1: str, path2: str) -> str:
"""Compare two samples and return similarity score and feature differences."""
try:
emb_engine = _get_embedding_engine()
e1 = emb_engine.get_embedding(path1)
e2 = emb_engine.get_embedding(path2)
if e1 is None or e2 is None:
return _err("One or both samples not found in embeddings index")
from engines.embedding_engine import cosine_similarity
sim = cosine_similarity(e1, e2)
f1 = emb_engine.analyzer.get_features(path1) if hasattr(emb_engine, 'analyzer') else {}
f2 = emb_engine.analyzer.get_features(path2) if hasattr(emb_engine, 'analyzer') else {}
return _ok({
"similarity": float(sim),
"sample1": {"path": path1, "features": f1},
"sample2": {"path": path2, "features": f2},
})
except Exception as e:
return _err(f"Error comparing samples: {str(e)}")
@mcp.tool()
def browse_library(ctx: Context, pack: str = "", role: str = "", bpm_min: float = 0, bpm_max: float = 0, key: str = "") -> str:
"""Browse the library with filters for pack, role, BPM range, and key."""
try:
analyzer = _get_analyzer()
if not analyzer.features:
analyzer.analyze_all()
results = []
for path, feats in analyzer.features.items():
if pack and pack.lower() not in feats.get("pack", "").lower():
continue
if role and role.lower() != feats.get("role", "").lower():
continue
if key and key.lower() not in feats.get("key", "").lower():
continue
bpm = feats.get("bpm", 0)
if bpm_min > 0 and bpm < bpm_min:
continue
if bpm_max > 0 and bpm > bpm_max:
continue
results.append({"path": path, **feats})
return _ok({"total": len(results), "samples": results[:50]})
except Exception as e:
return _err(f"Error browsing library: {str(e)}")
# ==================================================================
# BPM ANALYZER INTEGRATION (T090-T094)
# ==================================================================
@mcp.tool()
def analyze_all_bpm(ctx: Context, force_reanalyze: bool = False) -> str:
"""Analyze BPM of all samples in the reggaeton library using librosa.
This tool analyzes all 800+ samples in the library, extracting BPM,
confidence scores, and spectral embeddings. Results are stored in
the SQLite metadata store for fast retrieval.
Args:
force_reanalyze: Reanalyze all samples even if already in database
Returns:
JSON with analysis results:
- analyzed: Number of samples successfully analyzed
- total: Total number of samples found
- progress: Analysis progress percentage
- elapsed_minutes: Time taken for analysis
- sample_results: First 20 sample results for preview
- errors: Any errors encountered (first 10)
Note:
This operation takes approximately 30 minutes for 800 samples.
Progress is logged every 50 samples.
"""
resp = _send_to_ableton("analyze_all_bpm", {"force_reanalyze": force_reanalyze},
timeout=TIMEOUTS["analyze_all_bpm"])
if resp.get("status") == "success":
r = resp.get("result", {})
return _ok({
"analyzed": r.get("analyzed", 0),
"total": r.get("total", 0),
"progress": r.get("progress", "0%"),
"elapsed_minutes": r.get("elapsed_minutes", 0),
"library_path": r.get("library_path", ""),
"sample_preview": r.get("sample_results", [])[:5], # Show first 5
"errors": r.get("errors")[:3] if r.get("errors") else None, # Show first 3 errors
"note": "Full results stored in metadata store. Use browse_library or get_library_stats to query."
})
return _err(resp.get("message", "Unknown error during BPM analysis"))
@mcp.tool()
def select_bpm_coherent_pool(ctx: Context, target_bpm: float = 95, tolerance: float = 5, pool_size: int = 20) -> str:
"""Select samples that match target BPM within tolerance.
Uses librosa-analyzed BPM data from the metadata store to find
samples that will work well together at a specific tempo.
Args:
target_bpm: Target tempo to match (default 95)
tolerance: BPM tolerance (default ±5)
pool_size: Number of samples to return (default 20)
Returns:
JSON with selected samples and coherence scores.
"""
try:
from engines.metadata_store import SampleMetadataStore
import os
# Initialize store
db_path = os.path.join(os.path.dirname(__file__), "..", "..", "libreria", "metadata.db")
store = SampleMetadataStore(db_path)
store.init_database()
# Get coherent pool
pool = store.get_coherent_pool(target_bpm, tolerance=tolerance)
# Get details for each sample
results = []
for path in pool[:pool_size]:
features = store.get_sample_features(path)
if features:
results.append({
"path": path,
"bpm": features.bpm,
"key": features.key,
"category": features.categories[0] if features.categories else "unknown"
})
store.close()
return _ok({
"target_bpm": target_bpm,
"tolerance": tolerance,
"pool_size": len(pool),
"returned": len(results),
"samples": results
})
except Exception as e:
return _err(f"Error selecting BPM coherent pool: {str(e)}")
@mcp.tool()
def warp_clip_to_bpm(ctx: Context, track_index: int, clip_index: int,
original_bpm: float, target_bpm: float) -> str:
"""Warp audio clip from original BPM to target BPM.
Automatically selects warp mode (Complex Pro/Complex/Beats) based on
the BPM difference.
Args:
track_index: Track containing clip
clip_index: Clip slot index
original_bpm: Original sample BPM (from analysis)
target_bpm: Target project BPM
Returns:
JSON with warp result including warp mode used.
"""
resp = _send_to_ableton("auto_warp_sample", # Uses internal method
{"track_index": track_index, "clip_index": clip_index,
"original_bpm": original_bpm, "target_bpm": target_bpm},
timeout=TIMEOUTS["warp_clip_to_bpm"])
if resp.get("status") == "success":
r = resp.get("result", {})
return _ok({
"warped": r.get("warped", False),
"warp_mode": r.get("warp_mode", "unknown"),
"original_bpm": r.get("original_bpm", original_bpm),
"target_bpm": r.get("target_bpm", target_bpm),
"delta_pct": r.get("delta_pct", 0),
"warp_factor": r.get("warp_factor", 1.0)
})
return _err(resp.get("message", "Unknown error during warp"))
# ==================================================================
# ADVANCED PRODUCTION TOOLS (Sprint 2 - Phase 1 & 2)
# ==================================================================
@mcp.tool()
def generate_complete_reggaeton(ctx: Context, bpm: float = 95, key: str = "Am",
style: str = "classic", structure: str = "verse-chorus",
use_samples: bool = True) -> str:
"""Generate a complete reggaeton project with all elements.
Args:
bpm: Tempo in BPM (default 95)
key: Musical key (default Am)
style: Reggaeton style (classic, dembow, perreo, moombahton)
structure: Song structure (verse-chorus, full, intro-drop)
use_samples: Whether to use samples from the library
Returns:
JSON with project summary including tracks created, samples used, and arrangement.
"""
try:
from engines.production_workflow import ProductionWorkflow
workflow = ProductionWorkflow()
result = workflow.generate_complete_reggaeton(
bpm=bpm,
key=key,
style=style,
structure=structure,
use_samples=use_samples
)
return _ok({
"project_type": "complete_reggaeton",
"bpm": bpm,
"key": key,
"style": style,
"structure": structure,
"tracks_created": result.get("tracks", []),
"samples_used": result.get("samples", {}),
"arrangement": result.get("arrangement", {}),
"duration_bars": result.get("duration_bars", 64),
})
except ImportError:
return _err("Production workflow engine not available.")
except Exception as e:
return _err(f"Error generating complete reggaeton: {str(e)}")
@mcp.tool()
def generate_from_reference(ctx: Context, reference_audio_path: str) -> str:
"""Generate a track using a reference audio file for style matching.
Analyzes the reference audio using the reference_matcher engine,
finds similar samples from the library, and generates a track
with matching sonic characteristics.
Args:
reference_audio_path: Path to the reference audio file (.mp3, .wav)
Returns:
JSON with generated tracks info, matched samples, and similarity scores.
"""
try:
from engines.production_workflow import ProductionWorkflow
if not os.path.isfile(reference_audio_path):
return _err(f"Reference audio not found: {reference_audio_path}")
workflow = ProductionWorkflow()
result = workflow.generate_from_reference(reference_audio_path)
return _ok({
"reference": reference_audio_path,
**(result if isinstance(result, dict) else {"result": result}),
})
except ImportError as e:
return _err(f"Required engine not available: {str(e)}")
except Exception as e:
return _err(f"Error generating from reference: {str(e)}")
@mcp.tool()
def load_sample_to_clip(ctx: Context, track_index: int, clip_index: int, sample_path: str) -> str:
"""Load an audio sample into a Session View clip slot.
Args:
track_index: Index of the target track
clip_index: Index of the clip slot
sample_path: Absolute path to the audio file (.wav, .mp3)
Returns:
JSON with status of the load operation.
"""
if not os.path.isfile(sample_path):
return _err(f"Sample not found: {sample_path}")
resp = _send_to_ableton(
"load_sample_to_clip",
{"track_index": track_index, "clip_index": clip_index, "sample_path": sample_path},
timeout=TIMEOUTS["load_sample_to_clip"]
)
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def duplicate_clip(ctx: Context, source_track: int, source_clip: int,
target_track: int, target_clip: int) -> str:
"""Duplicate/clone a clip from one Session View slot to another.
Args:
source_track: Source track index
source_clip: Source clip slot index
target_track: Target track index (can be same as source)
target_clip: Target clip slot index
Returns:
JSON with duplication status and clip info.
"""
resp = _send_to_ableton(
"duplicate_clip",
{"source_track": source_track, "source_clip": source_clip,
"target_track": target_track, "target_clip": target_clip},
timeout=TIMEOUTS["duplicate_clip"]
)
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def load_sample_to_drum_rack(ctx: Context, track_index: int, sample_path: str,
pad_note: int = 36) -> str:
"""Load a sample into a specific pad (note) of a Drum Rack.
Args:
track_index: Index of the track containing the Drum Rack
pad_note: MIDI note number for the pad (default 36 = C1)
sample_path: Absolute path to the audio file
Returns:
JSON with status of the load operation.
"""
if not os.path.isfile(sample_path):
return _err(f"Sample not found: {sample_path}")
resp = _send_to_ableton(
"load_sample_to_drum_rack_pad",
{"track_index": track_index, "pad_note": pad_note, "sample_path": sample_path},
timeout=TIMEOUTS["load_sample_to_drum_rack"]
)
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
@mcp.tool()
def set_warp_markers(ctx: Context, track_index: int, clip_index: int, markers: list) -> str:
"""Configure warp markers for an audio clip.
Sets custom warp markers to adjust timing and groove of audio clips.
Args:
track_index: Index of the track containing the clip
clip_index: Index of the clip
markers: List of warp marker positions in bars [{"position": 0.0, "warp_to": 0.0}, ...]
Returns:
JSON with status and number of markers set.
"""
resp = _send_to_ableton(
"set_warp_markers",
{"track_index": track_index, "clip_index": clip_index, "markers": markers},
timeout=TIMEOUTS["set_warp_markers"]
)
if resp.get("status") == "success":
return _ok({
"track_index": track_index,
"clip_index": clip_index,
"markers_set": len(markers),
"markers": markers,
})
return _err(resp.get("message"))
@mcp.tool()
def reverse_clip(ctx: Context, track_index: int, clip_index: int) -> str:
"""Reverse an audio or MIDI clip.
Args:
track_index: Index of the track containing the clip
clip_index: Index of the clip to reverse
Returns:
JSON with status of the reverse operation.
"""
return _proxy_ableton_command(
"reverse_clip",
{"track_index": track_index, "clip_index": clip_index},
timeout=TIMEOUTS["reverse_clip"],
defaults={"track_index": track_index, "clip_index": clip_index},
)
@mcp.tool()
def pitch_shift_clip(ctx: Context, track_index: int, clip_index: int, semitones: float) -> str:
"""Pitch shift a clip without affecting tempo (using Complex Pro).
Args:
track_index: Index of the track containing the clip
clip_index: Index of the clip
semitones: Number of semitones to shift (positive or negative)
Returns:
JSON with new pitch value and status.
"""
if not -24.0 <= semitones <= 24.0:
return _err(f"Invalid pitch shift: {semitones}. Must be -24 to +24 semitones.")
return _proxy_ableton_command(
"pitch_shift_clip",
{"track_index": track_index, "clip_index": clip_index, "semitones": semitones},
timeout=TIMEOUTS["pitch_shift_clip"],
defaults={"track_index": track_index, "clip_index": clip_index, "pitch_shift_semitones": semitones},
)
@mcp.tool()
def time_stretch_clip(ctx: Context, track_index: int, clip_index: int, factor: float) -> str:
"""Time stretch a clip without affecting pitch.
Args:
track_index: Index of the track containing the clip
clip_index: Index of the clip
factor: Stretch factor (1.0 = normal, 2.0 = half speed/double length, 0.5 = double speed)
Returns:
JSON with new duration and status.
"""
if not 0.25 <= factor <= 4.0:
return _err(f"Invalid stretch factor: {factor}. Must be 0.25x to 4.0x.")
return _proxy_ableton_command(
"time_stretch_clip",
{"track_index": track_index, "clip_index": clip_index, "factor": factor},
timeout=TIMEOUTS["time_stretch_clip"],
defaults={"track_index": track_index, "clip_index": clip_index, "stretch_factor": factor},
)
@mcp.tool()
def slice_clip(ctx: Context, track_index: int, clip_index: int, num_slices: int = 8) -> str:
"""Slice an audio clip into multiple segments.
Divides a clip into equal slices, useful for creating drum racks
or rearranging audio segments.
Args:
track_index: Index of the track containing the clip
clip_index: Index of the clip to slice
num_slices: Number of slices to create (default 8, max 64)
Returns:
JSON with number of slices created and their positions.
"""
if not 2 <= num_slices <= 64:
return _err(f"Invalid number of slices: {num_slices}. Must be 2-64.")
return _proxy_ableton_command(
"slice_clip",
{"track_index": track_index, "clip_index": clip_index, "num_slices": num_slices},
timeout=TIMEOUTS["slice_clip"],
defaults={"track_index": track_index, "clip_index": clip_index, "num_slices": num_slices},
)
# ==================================================================
# FASE 3: MIXING & EFFECTS
# ==================================================================
@mcp.tool()
def create_bus_track(ctx: Context, bus_type: str = "Group") -> str:
"""Create a group track (bus) for mixing."""
return _proxy_ableton_command(
"create_bus_track",
{"bus_type": bus_type},
timeout=TIMEOUTS["create_bus_track"],
defaults={"bus_type": bus_type},
)
@mcp.tool()
def route_track_to_bus(ctx: Context, track_index: int, bus_name: str) -> str:
"""Route a track to a bus/group track."""
return _proxy_ableton_command(
"route_track_to_bus",
{"track_index": track_index, "bus_name": bus_name},
timeout=TIMEOUTS["route_track_to_bus"],
defaults={"track_index": track_index, "bus_name": bus_name},
)
@mcp.tool()
def create_return_track(ctx: Context, effect_type: str = "Reverb") -> str:
"""Create a return track with an effect."""
try:
from engines.mixing_engine import ReturnEffect, get_mixing_engine
normalized = effect_type.strip().upper().replace(" ", "_")
if normalized not in ReturnEffect.__members__:
return _err(
f"Unknown return effect '{effect_type}'. Available: {', '.join(ReturnEffect.__members__.keys())}"
)
engine = get_mixing_engine()
result = engine.return_manager.create_return_track(ReturnEffect[normalized])
return _ok({
"effect_type": effect_type,
"return_index": int(result.track_index),
"track_name": result.name,
"parameters": result.effect_parameters,
})
except Exception as e:
return _err(f"Error creating return track: {str(e)}")
@mcp.tool()
def set_track_send(ctx: Context, track_index: int, return_index: int, amount: float) -> str:
"""Configure send amount from a track to a return track."""
if not 0.0 <= amount <= 1.0:
return _err(f"Invalid send amount: {amount}. Must be 0.0-1.0.")
try:
from engines.mixing_engine import get_mixing_engine
engine = get_mixing_engine()
if engine.return_manager.set_track_send(track_index, return_index, amount):
return _ok({"track_index": track_index, "return_index": return_index, "amount": amount})
return _err("Failed to set send")
except Exception as e:
return _err(f"Error setting track send: {str(e)}")
@mcp.tool()
def insert_device(ctx: Context, track_index: int, device_name: str) -> str:
"""Insert a device/plugin on a track."""
resp = _send_to_ableton("insert_device", {"track_index": track_index, "device_name": device_name},
timeout=TIMEOUTS["insert_device"])
if resp.get("status") == "success":
return _ok({"track_index": track_index, "device": device_name, "device_index": resp.get("device_index")})
return _err(resp.get("message", "Failed to insert device"))
@mcp.tool()
def configure_eq(ctx: Context, track_index: int, preset: str = "default") -> str:
"""Configure EQ Eight on a track with a preset."""
return _proxy_ableton_command(
"configure_eq",
{"track_index": track_index, "preset": preset},
timeout=TIMEOUTS["configure_eq"],
defaults={"track_index": track_index, "preset": preset},
)
@mcp.tool()
def configure_compressor(ctx: Context, track_index: int, preset: str = "default",
threshold: float = -20.0, ratio: float = 4.0) -> str:
"""Configure Compressor on a track."""
try:
from engines.mixing_engine import get_compression_settings
compressor = get_compression_settings()
result = compressor.configure_compressor(
track_index,
threshold=threshold,
ratio=ratio,
preset=None if preset == "default" else preset,
)
if result.get("success"):
return _ok({
"track_index": track_index,
"preset": preset,
"threshold": threshold,
"ratio": ratio,
"settings": result.get("settings", {})
})
return _err(result.get("message", "Failed to configure compressor"))
except Exception as e:
return _err(f"Error configuring compressor: {str(e)}")
@mcp.tool()
def setup_sidechain(ctx: Context, source_track: int, target_track: int, amount: float = 0.5) -> str:
"""Setup sidechain compression from source track to target track."""
if not 0.0 <= amount <= 1.0:
return _err(f"Invalid sidechain amount: {amount}. Must be 0.0-1.0.")
return _proxy_ableton_command(
"setup_sidechain",
{"source_track": source_track, "target_track": target_track, "amount": amount},
timeout=TIMEOUTS["setup_sidechain"],
defaults={"source_track": source_track, "target_track": target_track, "amount": amount},
)
@mcp.tool()
def auto_gain_staging(ctx: Context) -> str:
"""Automatically adjust gain staging for all tracks."""
try:
from engines.mixing_engine import get_gain_staging
tracks_resp = _send_to_ableton("get_tracks", timeout=TIMEOUTS["get_tracks"])
if tracks_resp.get("status") != "success":
return _err(tracks_resp.get("message", "Failed to read tracks from Ableton"))
tracks = _ableton_result(tracks_resp).get("tracks", [])
track_config = [
{"track_index": t.get("index", 0), "name": t.get("name", ""), "role": t.get("name", "")}
for t in tracks
]
result = get_gain_staging().auto_gain_staging(track_config)
if result.get("success"):
return _ok({
"tracks_adjusted": result.get("total_tracks", 0),
"adjustments": result.get("applied_levels", []),
"headroom_ok": result.get("headroom_ok", False),
})
return _err(result.get("message", "Failed to adjust gain staging"))
except Exception as e:
return _err(f"Error in auto gain staging: {str(e)}")
@mcp.tool()
def apply_master_chain(ctx: Context, preset: str = "standard") -> str:
"""Apply a mastering chain to the master track."""
try:
from engines.mixing_engine import get_master_chain
selected_preset = "reggaeton_streaming" if preset == "standard" else preset
result = get_master_chain().apply_master_chain(selected_preset)
if result.get("success"):
return _ok({
"preset": selected_preset,
"devices_added": result.get("chain_applied", []),
"master_track": "Master"
})
return _err(result.get("message", "Failed to apply master chain"))
except Exception as e:
return _err(f"Error applying master chain: {str(e)}")
@mcp.tool()
def create_parallel_compression(ctx: Context, track_index: int,
ratio: float = 4.0,
threshold: float = -20.0,
makeup_gain: float = 0.0,
preset: str = "",
name: str = "") -> str:
"""Create a parallel compression chain for punch and clarity.
Implements New York-style parallel compression where:
1. Original track remains uncompressed (dry)
2. Duplicate track gets heavy compression (wet)
3. Both are blended for punch and clarity
Args:
track_index: Index of the track to apply parallel compression
ratio: Compression ratio (default 4.0). Ignored if preset is used.
threshold: Threshold in dB (default -20.0). Ignored if preset is used.
makeup_gain: Makeup gain in dB (default 0.0). Ignored if preset is used.
preset: Preset name - "drum_parallel", "vocal_parallel", or "bus_parallel"
name: Optional custom name for the compression chain
Returns:
JSON with chain creation status, track indices, and settings.
Presets:
drum_parallel: 8:1 ratio, fast attack (2ms), fast release (30ms), 35% wet
vocal_parallel: 4:1 ratio, medium attack (8ms), medium release (80ms), 45% wet
bus_parallel: 2:1 ratio, slow attack (15ms), slow release (150ms), 25% wet
"""
try:
resp = _send_to_ableton(
"create_parallel_compression",
{
"track_index": track_index,
"ratio": ratio,
"threshold": threshold,
"makeup_gain": makeup_gain,
"preset": preset,
"name": name,
},
timeout=TIMEOUTS["create_parallel_compression"]
)
if resp.get("status") == "success":
return _ok(resp.get("result", {}))
return _err(resp.get("message", "Failed to create parallel compression"))
except Exception as e:
return _err(f"Error creating parallel compression: {str(e)}")
# ==================================================================
# FASE 4: WORKFLOW & EXPORT
# ==================================================================
@mcp.tool()
def export_project(ctx: Context, path: str, format: str = "wav") -> str:
"""Export the project to audio file."""
try:
from engines.workflow_engine import WorkflowEngine
engine = WorkflowEngine()
result = engine.export_project(path, format)
if result.get("success"):
return _ok({
"export_path": path,
"format": format,
"duration": result.get("duration"),
"file_size": result.get("file_size")
})
return _err(result.get("message", "Failed to export project"))
except Exception as e:
return _err(f"Error exporting project: {str(e)}")
@mcp.tool()
@mcp.tool()
def discover_device_parameters(ctx: Context, track_index: int, device_index: int = None) -> str:
"""T090: Discover and enumerate all parameters for a device on a track.
Agent 9: Device Parameter Discovery System
This tool discovers all available parameters for a device, enabling
intelligent parameter mapping and fuzzy matching for device control.
Args:
track_index: Index of the track containing the device
device_index: Optional index of the device (if None, enumerates all devices)
Returns:
JSON with device information including:
- track_index, track_name
- device_count
- devices: list with device_index, device_name, class_name, parameters
- Each parameter includes: name, index, min, max, value, is_enabled
"""
return _proxy_ableton_command(
"discover_device_parameters",
{"track_index": track_index, "device_index": device_index},
timeout=TIMEOUTS.get("get_tracks", 15.0),
defaults={"track_index": track_index, "device_index": device_index},
)
def get_project_summary(ctx: Context) -> str:
"""Get a summary of the current project from Ableton Live."""
try:
resp = _send_to_ableton("get_session_info", timeout=5.0)
if resp.get("status") != "success":
return _err(f"Cannot get session info: {resp.get('message')}")
session = resp.get("result", {})
tracks_resp = _send_to_ableton("get_tracks", timeout=5.0)
tracks = tracks_resp.get("result", {}).get("tracks", []) if tracks_resp.get("status") == "success" else []
midi_count = sum(1 for t in tracks if t.get("is_midi"))
audio_count = sum(1 for t in tracks if t.get("is_audio"))
device_names = list(set(d for t in tracks for d in t.get("devices", [])))
return _ok({
"track_count": session.get("num_tracks", len(tracks)),
"midi_tracks": midi_count,
"audio_tracks": audio_count,
"return_tracks": session.get("num_return_tracks", 0),
"clips": sum(t.get("clip_slots", 0) for t in tracks),
"scenes": session.get("num_scenes", 0),
"devices_used": device_names[:20],
"duration_minutes": 0,
"project_name": "Live Project",
"tempo": session.get("tempo", 0),
"is_playing": session.get("is_playing", False),
})
except Exception as e:
return _err(f"Error getting project summary: {str(e)}")
@mcp.tool()
def suggest_improvements(ctx: Context) -> str:
"""Get AI suggestions for improving the project."""
try:
from engines.workflow_engine import WorkflowEngine
engine = WorkflowEngine()
result = engine.suggest_improvements()
return _ok({
"suggestions": result.get("suggestions", []),
"priority": result.get("priority", "medium"),
"categories": result.get("categories", {}),
"estimated_impact": result.get("estimated_impact", "medium")
})
except Exception as e:
return _err(f"Error generating suggestions: {str(e)}")
@mcp.tool()
def validate_project(ctx: Context) -> str:
"""Validate project consistency and best practices using live Ableton data."""
try:
tracks_resp = _send_to_ableton("get_tracks", timeout=5.0)
tracks = tracks_resp.get("result", {}).get("tracks", []) if tracks_resp.get("status") == "success" else []
session_resp = _send_to_ableton("get_session_info", timeout=5.0)
session = session_resp.get("result", {}) if session_resp.get("status") == "success" else {}
issues = []
warnings = []
passed = []
track_count = len(tracks)
if track_count == 0:
issues.append("No tracks in project")
else:
passed.append(f"{track_count} tracks found")
midi_tracks = [t for t in tracks if t.get("is_midi")]
audio_tracks = [t for t in tracks if t.get("is_audio")]
if not midi_tracks and not audio_tracks:
warnings.append("All tracks appear to be return or master tracks")
if session.get("tempo", 0) < 60 or session.get("tempo", 0) > 200:
warnings.append(f"Unusual tempo: {session.get('tempo')} BPM")
else:
passed.append(f"Tempo OK: {session.get('tempo')} BPM")
muted = [t["name"] for t in tracks if t.get("mute")]
if muted:
warnings.append(f"Muted tracks: {', '.join(muted)}")
empty = [t["name"] for t in tracks if t.get("clip_slots", 0) == 0]
if empty:
warnings.append(f"Tracks with no clip slots: {', '.join(empty)}")
score = max(0, 100 - len(issues) * 25 - len(warnings) * 10)
return _ok({
"is_valid": len(issues) == 0,
"issues": issues,
"warnings": warnings,
"passed_checks": passed,
"score": score,
"track_count": track_count,
"midi_count": len(midi_tracks),
"audio_count": len(audio_tracks),
})
except Exception as e:
return _err(f"Error validating project: {str(e)}")
@mcp.tool()
def humanize_track(ctx: Context, track_index: int, intensity: float = 0.5) -> str:
"""Apply humanization to a MIDI track (velocity and timing variations)."""
if not 0.0 <= intensity <= 1.0:
return _err(f"Invalid intensity: {intensity}. Must be 0.0-1.0.")
return _proxy_ableton_command(
"humanize_track",
{"track_index": track_index, "intensity": intensity},
timeout=TIMEOUTS["humanize_track"],
defaults={"track_index": track_index, "intensity": intensity},
)
# ==================================================================
# FASE 5: PHASE 1 - BRIDGE ENGINES → ABLETON (T001-T015 + T081-T085)
# ==================================================================
# ------------------------------------------------------------------
# Production Pipeline Tools (T081-T085)
# ------------------------------------------------------------------
@mcp.tool()
def produce_reggaeton(ctx: Context, bpm: float = 95, key: str = "Am",
style: str = "classic", structure: str = "verse-chorus",
record_arrangement: bool = True) -> str:
"""Generate a complete reggaeton production pipeline (T081) - Session View based.
DEPRECATED: Consider using build_arrangement_timeline() for direct Arrangement View creation.
This tool creates content in Session View clips first. For direct timeline-based
composition without the Session View intermediate step, use build_arrangement_timeline().
MIGRATION GUIDE:
- OLD: produce_reggaeton() → Session View clips → manual arrangement
- NEW: build_arrangement_timeline() → Direct Arrangement View placement
Args:
bpm: Tempo in BPM (default 95)
key: Musical key (default Am)
style: Reggaeton style (classic, dembow, perreo, moombahton)
structure: Song structure (verse-chorus, full, intro-drop)
record_arrangement: Record to Arrangement View automatically (default True)
Returns:
JSON with complete production summary.
"""
try:
logger.info("produce_reggaeton: start bpm=%s key=%s style=%s structure=%s", bpm, key, style, structure)
from engines.production_workflow import ProductionWorkflow
workflow = ProductionWorkflow()
result = workflow.produce_reggaeton(
bpm=bpm, key=key, style=style, structure=structure,
record_arrangement=record_arrangement
)
logger.info("produce_reggaeton: workflow returned")
return _ok({
"production_type": "reggaeton",
"bpm": bpm,
"key": key,
"style": style,
"structure": structure,
"record_arrangement": record_arrangement,
"tracks_created": result.get("tracks", []),
"clips_generated": result.get("clips", []),
"duration_bars": result.get("duration_bars", 64),
})
except ImportError:
logger.exception("produce_reggaeton: import error")
return _err("Production workflow engine not available.")
except Exception as e:
logger.exception("produce_reggaeton: failed")
return _err(f"Error producing reggaeton: {str(e)}")
@mcp.tool()
def produce_from_reference(ctx: Context, audio_path: str) -> str:
"""Generate production from a reference audio file (T082).
Analyzes the reference audio and generates a matching production.
Args:
audio_path: Path to the reference audio file (.mp3, .wav)
Returns:
JSON with production details and similarity analysis.
"""
if not os.path.isfile(audio_path):
return _err(f"Reference audio not found: {audio_path}")
try:
from engines.production_workflow import ProductionWorkflow
workflow = ProductionWorkflow()
result = workflow.produce_from_reference(reference_path=audio_path)
return _ok({
"reference": audio_path,
"production_type": "from_reference",
**(result if isinstance(result, dict) else {"result": result}),
})
except ImportError:
return _err("Production workflow or reference matcher engine not available.")
except Exception as e:
return _err(f"Error producing from reference: {str(e)}")
@mcp.tool()
def produce_arrangement(ctx: Context, bpm: float = 95, key: str = "Am",
style: str = "classic") -> str:
"""Generate production directly in Arrangement View (T083).
Creates a complete song structure in Arrangement View.
Args:
bpm: Tempo in BPM (default 95)
key: Musical key (default Am)
style: Production style (classic, modern, perreo, moombahton)
Returns:
JSON with arrangement details and clip positions.
"""
try:
from engines.production_workflow import ProductionWorkflow
workflow = ProductionWorkflow()
result = workflow.produce_arrangement(
bpm=bpm, key=key, style=style
)
return _ok({
"production_type": "arrangement",
"view": "Arrangement",
"bpm": bpm,
"key": key,
"style": style,
"tracks_created": result.get("tracks", []),
"clips_arranged": result.get("clips", []),
"total_bars": result.get("total_bars", 128),
})
except ImportError:
return _err("Production workflow engine not available.")
except Exception as e:
return _err(f"Error producing arrangement: {str(e)}")
@mcp.tool()
def complete_production(ctx: Context, bpm: float = 95, key: str = "Am",
style: str = "classic", output_dir: str = "") -> str:
"""Complete production pipeline with render (T084).
Generates a full production and renders it to audio.
Args:
bpm: Tempo in BPM (default 95)
key: Musical key (default Am)
style: Production style
output_dir: Directory for rendered output (optional)
Returns:
JSON with production summary and render path.
"""
try:
from engines.production_workflow import ProductionWorkflow
from engines.workflow_engine import WorkflowEngine
workflow = ProductionWorkflow()
result = workflow.complete_production(
bpm=bpm, key=key, style=style
)
render_path = ""
if output_dir and os.path.isdir(output_dir):
wf_engine = WorkflowEngine()
render_result = wf_engine.export_project(
path=os.path.join(output_dir, f"production_{int(time.time())}.wav"),
format="wav"
)
render_path = render_result.get("export_path", "")
return _ok({
"production_type": "complete",
"bpm": bpm,
"key": key,
"style": style,
"tracks_created": result.get("tracks", []),
"clips_generated": result.get("clips", []),
"render_path": render_path,
})
except ImportError:
return _err("Production workflow engine not available.")
except Exception as e:
return _err(f"Error in complete production: {str(e)}")
@mcp.tool()
def batch_produce(ctx: Context, count: int = 3, style: str = "classic",
bpm_range: str = "90-100") -> str:
"""Batch produce multiple songs (T085).
Generates multiple productions with varying parameters.
Args:
count: Number of songs to produce (default 3, max 10)
style: Production style
bpm_range: BPM range as "min-max" string
Returns:
JSON with batch production summary.
"""
if not 1 <= count <= 10:
return _err(f"Invalid count: {count}. Must be 1-10.")
try:
from engines.production_workflow import ProductionWorkflow
workflow = ProductionWorkflow()
results = []
bpms = []
if "-" in bpm_range:
parts = bpm_range.split("-")
bpm_min, bpm_max = int(parts[0]), int(parts[1])
import random
bpms = [random.randint(bpm_min, bpm_max) for _ in range(count)]
else:
bpms = [int(bpm_range)] * count
keys = ["Am", "Dm", "Em", "Gm", "Cm"]
for i in range(count):
result = workflow.produce_reggaeton(
bpm=bpms[i],
key=keys[i % len(keys)],
style=style,
structure="verse-chorus"
)
results.append({
"index": i + 1,
"bpm": bpms[i],
"key": keys[i % len(keys)],
"tracks": len(result.get("tracks", [])),
})
return _ok({
"batch_size": count,
"style": style,
"bpm_range": bpm_range,
"productions": results,
})
except ImportError:
return _err("Production workflow engine not available.")
except Exception as e:
return _err(f"Error in batch production: {str(e)}")
# ------------------------------------------------------------------
# MIDI Clip Generator Tools (T001-T005)
# ------------------------------------------------------------------
@mcp.tool()
def generate_midi_clip(ctx: Context, track_index: int, clip_index: int = 0,
notes: list = None) -> str:
"""Create a MIDI clip with specified notes (T001).
Args:
track_index: Index of the target track
clip_index: Index of the clip slot (default 0)
notes: List of note dicts with pitch, start_time, duration, velocity
Returns:
JSON with clip creation status.
"""
if notes is None:
notes = []
try:
resp = _send_to_ableton(
"create_clip",
{"track_index": track_index, "clip_index": clip_index, "length": 4.0},
timeout=TIMEOUTS["generate_midi_clip"]
)
if resp.get("status") == "success" and notes:
resp2 = _send_to_ableton(
"add_notes_to_clip",
{"track_index": track_index, "clip_index": clip_index, "notes": notes},
timeout=TIMEOUTS["generate_midi_clip"]
)
if resp2.get("status") == "success":
return _ok({
"track_index": track_index,
"clip_index": clip_index,
"notes_added": len(notes),
})
return _err(resp2.get("message", "Failed to add notes"))
return _ok({
"track_index": track_index,
"clip_index": clip_index,
"notes_added": 0,
"created_empty": True,
})
except Exception as e:
return _err(f"Error generating MIDI clip: {str(e)}")
@mcp.tool()
def generate_dembow_clip(ctx: Context, track_index: int, clip_index: int = 0,
bars: int = 4, variation: str = "standard") -> str:
"""Generate a dembow rhythm MIDI clip (T002).
Creates a classic reggaeton dembow pattern.
Args:
track_index: Index of the target track
clip_index: Index of the clip slot (default 0)
bars: Number of bars (default 4)
variation: Pattern variation (standard, minimal, complex, fill)
Returns:
JSON with clip generation status.
"""
try:
patterns = {
"standard": [
{"pitch": 36, "start_time": 0.0, "duration": 0.25, "velocity": 100},
{"pitch": 42, "start_time": 0.25, "duration": 0.25, "velocity": 80},
{"pitch": 38, "start_time": 0.5, "duration": 0.25, "velocity": 90},
{"pitch": 42, "start_time": 0.75, "duration": 0.25, "velocity": 80},
{"pitch": 36, "start_time": 1.0, "duration": 0.25, "velocity": 100},
{"pitch": 42, "start_time": 1.25, "duration": 0.25, "velocity": 80},
{"pitch": 38, "start_time": 1.5, "duration": 0.25, "velocity": 90},
{"pitch": 42, "start_time": 1.75, "duration": 0.25, "velocity": 80},
],
"minimal": [
{"pitch": 36, "start_time": 0.0, "duration": 0.5, "velocity": 100},
{"pitch": 42, "start_time": 0.5, "duration": 0.5, "velocity": 80},
{"pitch": 36, "start_time": 1.0, "duration": 0.5, "velocity": 100},
{"pitch": 42, "start_time": 1.5, "duration": 0.5, "velocity": 80},
],
"complex": [
{"pitch": 36, "start_time": 0.0, "duration": 0.25, "velocity": 100},
{"pitch": 42, "start_time": 0.125, "duration": 0.125, "velocity": 70},
{"pitch": 42, "start_time": 0.25, "duration": 0.25, "velocity": 80},
{"pitch": 38, "start_time": 0.5, "duration": 0.25, "velocity": 90},
{"pitch": 42, "start_time": 0.625, "duration": 0.125, "velocity": 70},
{"pitch": 42, "start_time": 0.75, "duration": 0.25, "velocity": 80},
{"pitch": 36, "start_time": 1.0, "duration": 0.25, "velocity": 100},
{"pitch": 42, "start_time": 1.125, "duration": 0.125, "velocity": 70},
{"pitch": 42, "start_time": 1.25, "duration": 0.25, "velocity": 80},
{"pitch": 38, "start_time": 1.5, "duration": 0.25, "velocity": 90},
{"pitch": 42, "start_time": 1.75, "duration": 0.25, "velocity": 80},
],
"fill": [
{"pitch": 36, "start_time": 0.0, "duration": 0.25, "velocity": 100},
{"pitch": 38, "start_time": 0.25, "duration": 0.25, "velocity": 100},
{"pitch": 42, "start_time": 0.5, "duration": 0.25, "velocity": 100},
{"pitch": 38, "start_time": 0.75, "duration": 0.25, "velocity": 100},
],
}
notes = patterns.get(variation, patterns["standard"])
full_notes = []
for bar in range(bars):
for note in notes:
full_notes.append({
"pitch": note["pitch"],
"start_time": note["start_time"] + (bar * 2.0),
"duration": note["duration"],
"velocity": note["velocity"],
})
resp = _send_to_ableton(
"create_clip",
{"track_index": track_index, "clip_index": clip_index, "length": float(bars * 2)},
timeout=TIMEOUTS["generate_dembow_clip"]
)
if resp.get("status") == "success":
resp2 = _send_to_ableton(
"add_notes_to_clip",
{"track_index": track_index, "clip_index": clip_index, "notes": full_notes},
timeout=TIMEOUTS["generate_dembow_clip"]
)
if resp2.get("status") == "success":
return _ok({
"track_index": track_index,
"clip_index": clip_index,
"variation": variation,
"bars": bars,
"notes_added": len(full_notes),
})
return _err(resp.get("message", "Failed to create dembow clip"))
except Exception as e:
return _err(f"Error generating dembow clip: {str(e)}")
@mcp.tool()
def generate_bass_clip(ctx: Context, track_index: int, clip_index: int = 0,
bars: int = 4, root_notes: list = None, style: str = "standard") -> str:
"""Generate a bassline MIDI clip (T003).
Creates a reggaeton-style bassline pattern.
Args:
track_index: Index of the target track
clip_index: Index of the clip slot (default 0)
bars: Number of bars (default 4)
root_notes: List of root note pitches (default [36, 36, 36, 36])
style: Bass style (standard, melodic, staccato, slides)
Returns:
JSON with clip generation status.
"""
if root_notes is None:
root_notes = [36] * 4
try:
notes = []
base_octave = 36
for bar in range(bars):
root = root_notes[bar % len(root_notes)] if root_notes else base_octave
if style == "standard":
notes.extend([
{"pitch": root, "start_time": bar * 2.0, "duration": 0.5, "velocity": 100},
{"pitch": root, "start_time": bar * 2.0 + 0.5, "duration": 0.5, "velocity": 90},
{"pitch": root, "start_time": bar * 2.0 + 1.0, "duration": 0.5, "velocity": 100},
{"pitch": root + 7, "start_time": bar * 2.0 + 1.5, "duration": 0.5, "velocity": 80},
])
elif style == "melodic":
notes.extend([
{"pitch": root, "start_time": bar * 2.0, "duration": 0.75, "velocity": 100},
{"pitch": root + 4, "start_time": bar * 2.0 + 0.75, "duration": 0.25, "velocity": 80},
{"pitch": root + 7, "start_time": bar * 2.0 + 1.0, "duration": 0.5, "velocity": 90},
{"pitch": root, "start_time": bar * 2.0 + 1.5, "duration": 0.5, "velocity": 85},
])
elif style == "staccato":
notes.extend([
{"pitch": root, "start_time": bar * 2.0, "duration": 0.125, "velocity": 110},
{"pitch": root, "start_time": bar * 2.0 + 0.5, "duration": 0.125, "velocity": 100},
{"pitch": root, "start_time": bar * 2.0 + 1.0, "duration": 0.125, "velocity": 110},
{"pitch": root, "start_time": bar * 2.0 + 1.5, "duration": 0.125, "velocity": 100},
])
else: # slides or default
notes.extend([
{"pitch": root, "start_time": bar * 2.0, "duration": 1.0, "velocity": 100},
{"pitch": root + 12, "start_time": bar * 2.0 + 1.0, "duration": 0.25, "velocity": 90},
{"pitch": root, "start_time": bar * 2.0 + 1.5, "duration": 0.5, "velocity": 80},
])
resp = _send_to_ableton(
"create_clip",
{"track_index": track_index, "clip_index": clip_index, "length": float(bars * 2)},
timeout=TIMEOUTS["generate_bass_clip"]
)
if resp.get("status") == "success":
resp2 = _send_to_ableton(
"add_notes_to_clip",
{"track_index": track_index, "clip_index": clip_index, "notes": notes},
timeout=TIMEOUTS["generate_bass_clip"]
)
if resp2.get("status") == "success":
return _ok({
"track_index": track_index,
"clip_index": clip_index,
"style": style,
"bars": bars,
"notes_added": len(notes),
})
return _err(resp.get("message", "Failed to create bass clip"))
except Exception as e:
return _err(f"Error generating bass clip: {str(e)}")
@mcp.tool()
def generate_chords_clip(ctx: Context, track_index: int, clip_index: int = 0,
bars: int = 4, progression: str = "i-v-vi-iv", key: str = "Am") -> str:
"""Generate a chord progression MIDI clip (T004).
Creates chord patterns for reggaeton progressions.
Args:
track_index: Index of the target track
clip_index: Index of the clip slot (default 0)
bars: Number of bars (default 4)
progression: Roman numeral progression (default "i-v-vi-iv")
key: Musical key (default Am)
Returns:
JSON with clip generation status.
"""
try:
progressions = {
"i-v-vi-iv": [0, 7, 9, 5],
"i-iv-v": [0, 5, 7],
"i-vi-iv-v": [0, 9, 5, 7],
"i-v-i-v": [0, 7, 0, 7],
"i-iv-i-v": [0, 5, 0, 7],
}
offsets = progressions.get(progression, progressions["i-v-vi-iv"])
base_note = 48 if key.endswith("m") else 60
if key.startswith("C"): base_note = 48 if key.endswith("m") else 60
elif key.startswith("D"): base_note = 50 if key.endswith("m") else 62
elif key.startswith("E"): base_note = 52 if key.endswith("m") else 64
elif key.startswith("F"): base_note = 53 if key.endswith("m") else 65
elif key.startswith("G"): base_note = 55 if key.endswith("m") else 67
elif key.startswith("A"): base_note = 45 if key.endswith("m") else 57
elif key.startswith("B"): base_note = 47 if key.endswith("m") else 59
notes = []
chord_length = bars // len(offsets) if bars >= len(offsets) else 1
for i, offset in enumerate(offsets):
for bar in range(chord_length):
root = base_note + offset
if key.endswith("m"):
notes.extend([
{"pitch": root, "start_time": i * chord_length * 2.0 + bar * 2.0, "duration": 2.0, "velocity": 70},
{"pitch": root + 3, "start_time": i * chord_length * 2.0 + bar * 2.0, "duration": 2.0, "velocity": 70},
{"pitch": root + 7, "start_time": i * chord_length * 2.0 + bar * 2.0, "duration": 2.0, "velocity": 70},
])
else:
notes.extend([
{"pitch": root, "start_time": i * chord_length * 2.0 + bar * 2.0, "duration": 2.0, "velocity": 70},
{"pitch": root + 4, "start_time": i * chord_length * 2.0 + bar * 2.0, "duration": 2.0, "velocity": 70},
{"pitch": root + 7, "start_time": i * chord_length * 2.0 + bar * 2.0, "duration": 2.0, "velocity": 70},
])
resp = _send_to_ableton(
"create_clip",
{"track_index": track_index, "clip_index": clip_index, "length": float(bars * 2)},
timeout=TIMEOUTS["generate_chords_clip"]
)
if resp.get("status") == "success":
resp2 = _send_to_ableton(
"add_notes_to_clip",
{"track_index": track_index, "clip_index": clip_index, "notes": notes},
timeout=TIMEOUTS["generate_chords_clip"]
)
if resp2.get("status") == "success":
return _ok({
"track_index": track_index,
"clip_index": clip_index,
"progression": progression,
"key": key,
"bars": bars,
"notes_added": len(notes),
})
return _err(resp.get("message", "Failed to create chords clip"))
except Exception as e:
return _err(f"Error generating chords clip: {str(e)}")
@mcp.tool()
def generate_melody_clip(ctx: Context, track_index: int, clip_index: int = 0,
bars: int = 4, scale: str = "minor", density: str = "medium") -> str:
"""Generate a melodic line MIDI clip (T005).
Creates a melody pattern for reggaeton.
Args:
track_index: Index of the target track
clip_index: Index of the clip slot (default 0)
bars: Number of bars (default 4)
scale: Scale type (minor, major, harmonic_minor, pentatonic)
density: Note density (sparse, medium, dense)
Returns:
JSON with clip generation status.
"""
try:
scales = {
"minor": [60, 62, 63, 65, 67, 68, 70, 72],
"major": [60, 62, 64, 65, 67, 69, 71, 72],
"harmonic_minor": [60, 62, 63, 65, 67, 68, 71, 72],
"pentatonic": [60, 62, 64, 67, 69, 72],
}
scale_notes = scales.get(scale, scales["minor"])
density_ratios = {"sparse": 0.25, "medium": 0.5, "dense": 0.75}
ratio = density_ratios.get(density, 0.5)
import random
random.seed(42)
notes = []
sixteenth = 2.0 / 16
for bar in range(bars):
for step in range(16):
if random.random() < ratio:
note_pitch = random.choice(scale_notes)
start = bar * 2.0 + step * sixteenth
duration = sixteenth * random.choice([1, 2, 4])
velocity = random.randint(70, 110)
notes.append({
"pitch": note_pitch,
"start_time": start,
"duration": duration,
"velocity": velocity,
})
resp = _send_to_ableton(
"create_clip",
{"track_index": track_index, "clip_index": clip_index, "length": float(bars * 2)},
timeout=TIMEOUTS["generate_melody_clip"]
)
if resp.get("status") == "success":
resp2 = _send_to_ableton(
"add_notes_to_clip",
{"track_index": track_index, "clip_index": clip_index, "notes": notes},
timeout=TIMEOUTS["generate_melody_clip"]
)
if resp2.get("status") == "success":
return _ok({
"track_index": track_index,
"clip_index": clip_index,
"scale": scale,
"density": density,
"bars": bars,
"notes_added": len(notes),
})
return _err(resp.get("message", "Failed to create melody clip"))
except Exception as e:
return _err(f"Error generating melody clip: {str(e)}")
# ------------------------------------------------------------------
# Sample Management Tools (T006-T010)
# ------------------------------------------------------------------
@mcp.tool()
def load_samples_for_genre(ctx: Context, genre: str, key: str = "", bpm: float = 0) -> str:
"""Select and load samples for a genre (T008).
This is an alias for select_samples_for_genre with additional auto-loading.
Args:
genre: Genre to select samples for
key: Musical key filter (optional)
bpm: BPM filter (optional)
Returns:
JSON with selected samples info.
"""
try:
from engines.sample_selector import SampleSelector, get_selector
selector = get_selector()
if selector is None:
return _err("Sample selector not available. Check libreria/reggaeton path.")
group = selector.select_for_genre(genre, key if key else None, bpm if bpm > 0 else None)
result = {
"genre": group.genre,
"key": group.key,
"bpm": group.bpm,
"drums": {},
"bass": [],
"synths": [],
"fx": [],
}
kit = group.drums
if kit.kick:
result["drums"]["kick"] = kit.kick.name
if kit.snare:
result["drums"]["snare"] = kit.snare.name
if kit.clap:
result["drums"]["clap"] = kit.clap.name
if kit.hat_closed:
result["drums"]["hat_closed"] = kit.hat_closed.name
if kit.hat_open:
result["drums"]["hat_open"] = kit.hat_open.name
result["bass"] = [s.name for s in (group.bass or [])[:5]]
result["synths"] = [s.name for s in (group.synths or [])[:5]]
result["fx"] = [s.name for s in (group.fx or [])[:3]]
return _ok(result)
except ImportError:
return _err("Sample selector engine not available.")
except Exception as e:
return _err(f"Error loading samples for genre: {str(e)}")
@mcp.tool()
def create_drum_kit(ctx: Context, track_index: int, kick_path: str = "",
snare_path: str = "", hat_path: str = "", clap_path: str = "") -> str:
"""Create a drum kit by loading samples into a Drum Rack (T009).
Args:
track_index: Index of the track containing the Drum Rack
kick_path: Path to kick sample (optional)
snare_path: Path to snare sample (optional)
hat_path: Path to hi-hat sample (optional)
clap_path: Path to clap sample (optional)
Returns:
JSON with kit creation status.
"""
try:
samples = [
(kick_path, 36),
(snare_path, 38),
(hat_path, 42),
(clap_path, 39),
]
loaded = []
errors = []
for path, note in samples:
if path and os.path.isfile(path):
resp = _send_to_ableton(
"load_sample_to_drum_rack",
{"track_index": track_index, "sample_path": path, "pad_note": note},
timeout=TIMEOUTS["create_drum_kit"]
)
if resp.get("status") == "success":
loaded.append({"note": note, "path": path})
else:
errors.append({"note": note, "error": resp.get("message", "unknown")})
elif path:
errors.append({"note": note, "error": f"File not found: {path}"})
return _ok({
"track_index": track_index,
"samples_loaded": len(loaded),
"loaded": loaded,
"errors": errors,
})
except Exception as e:
return _err(f"Error creating drum kit: {str(e)}")
@mcp.tool()
def build_track_from_samples(ctx: Context, track_type: str = "drums",
sample_role: str = "drums") -> str:
"""Build a complete track from library samples (T010).
Creates a track and loads appropriate samples automatically.
Args:
track_type: Type of track (drums, bass, melody, fx)
sample_role: Sample role to filter by (drums, bass, synths, fx)
Returns:
JSON with track creation and sample loading status.
"""
try:
from engines.sample_selector import get_selector
selector = get_selector()
if selector is None:
return _err("Sample selector not available.")
resp = _send_to_ableton(
"create_audio_track",
{"index": -1},
timeout=TIMEOUTS["build_track_from_samples"]
)
if resp.get("status") != "success":
return _err("Failed to create audio track")
track_index = resp.get("track_index", -1)
if track_index < 0:
return _err("Invalid track index returned")
_send_to_ableton(
"set_track_name",
{"track_index": track_index, "name": f"{track_type.title()} Track"},
timeout=TIMEOUTS["build_track_from_samples"]
)
samples = selector.get_samples_by_role(sample_role)[:4]
loaded = []
for i, sample in enumerate(samples):
clip_resp = _send_to_ableton(
"load_sample_to_clip",
{"track_index": track_index, "clip_index": i, "sample_path": sample.path},
timeout=TIMEOUTS["build_track_from_samples"]
)
if clip_resp.get("status") == "success":
loaded.append({"index": i, "sample": sample.name})
return _ok({
"track_type": track_type,
"track_index": track_index,
"samples_loaded": len(loaded),
"samples": loaded,
})
except ImportError:
return _err("Sample selector engine not available.")
except Exception as e:
return _err(f"Error building track from samples: {str(e)}")
# ------------------------------------------------------------------
# Configuration-Based Generators (T011-T015)
# ------------------------------------------------------------------
@mcp.tool()
def generate_full_song(ctx: Context, bpm: float = 95, key: str = "Am",
style: str = "classic", structure: str = "standard") -> str:
"""Generate a complete song with multiple elements (T011).
This is an enhanced version that creates drums, bass, chords, and melody.
Args:
bpm: Tempo in BPM (default 95)
key: Musical key (default Am)
style: Song style (classic, modern, perreo, moombahton)
structure: Song structure (standard, verse-chorus, full)
Returns:
JSON with song generation summary.
"""
try:
from engines.production_workflow import ProductionWorkflow
workflow = ProductionWorkflow()
result = workflow.generate_song(
genre="reggaeton",
bpm=bpm,
key=key,
style=style,
structure=structure
)
return _ok({
"song_type": "full",
"bpm": bpm,
"key": key,
"style": style,
"structure": structure,
"tracks_created": result.get("tracks", []),
"clips_generated": result.get("clips", []),
"duration_bars": result.get("duration_bars", 128),
})
except ImportError:
return _err("Production workflow engine not available.")
except Exception as e:
return _err(f"Error generating full song: {str(e)}")
@mcp.tool()
def generate_track_from_config(ctx: Context, track_config_json: str) -> str:
"""Generate a track from a JSON configuration (T012).
Flexible track generation using a configuration object.
Args:
track_config_json: JSON string with track configuration
Example: '{"type": "drums", "pattern": "dembow", "bars": 8}'
Returns:
JSON with track generation status.
"""
try:
import json as json_lib
config = json_lib.loads(track_config_json)
track_type = config.get("type", "drums")
resp = _send_to_ableton(
"create_midi_track",
{"index": -1},
timeout=TIMEOUTS["generate_track_from_config"]
)
if resp.get("status") != "success":
return _err("Failed to create MIDI track")
track_index = resp.get("track_index", -1)
_send_to_ableton(
"set_track_name",
{"track_index": track_index, "name": config.get("name", f"{track_type.title()} Track")},
timeout=TIMEOUTS["generate_track_from_config"]
)
if track_type == "drums":
pattern = config.get("pattern", "dembow")
bars = config.get("bars", 4)
if pattern == "dembow":
return generate_dembow_clip(ctx, track_index, 0, bars, "standard")
elif track_type == "bass":
bars = config.get("bars", 4)
root_notes = config.get("root_notes", [36])
style = config.get("style", "standard")
return generate_bass_clip(ctx, track_index, 0, bars, root_notes, style)
elif track_type == "chords":
bars = config.get("bars", 4)
progression = config.get("progression", "i-v-vi-iv")
key = config.get("key", "Am")
return generate_chords_clip(ctx, track_index, 0, bars, progression, key)
elif track_type == "melody":
bars = config.get("bars", 4)
scale = config.get("scale", "minor")
density = config.get("density", "medium")
return generate_melody_clip(ctx, track_index, 0, bars, scale, density)
return _ok({
"track_type": track_type,
"track_index": track_index,
"config": config,
"status": "created",
})
except json_lib.JSONDecodeError:
return _err("Invalid JSON configuration")
except Exception as e:
return _err(f"Error generating track from config: {str(e)}")
@mcp.tool()
def generate_section(ctx: Context, section_config_json: str, start_bar: int = 0) -> str:
"""Generate a song section from JSON config (T013).
Creates a section (verse, chorus, intro, etc.) at the specified position.
Args:
section_config_json: JSON string with section configuration
Example: '{"type": "verse", "bars": 16, "elements": ["drums", "bass"]}'
start_bar: Starting bar position in the song
Returns:
JSON with section generation status.
"""
try:
import json as json_lib
config = json_lib.loads(section_config_json)
section_type = config.get("type", "verse")
bars = config.get("bars", 8)
elements = config.get("elements", ["drums"])
tracks_created = []
for element in elements:
element_config = {
"type": element,
"bars": bars,
"name": f"{section_type.title()} {element.title()}",
}
if element == "drums":
element_config["pattern"] = "dembow"
result = generate_track_from_config(ctx, json_lib.dumps(element_config))
tracks_created.append({"element": element, "result": result})
return _ok({
"section_type": section_type,
"start_bar": start_bar,
"bars": bars,
"elements": elements,
"tracks_created": len(tracks_created),
})
except json_lib.JSONDecodeError:
return _err("Invalid JSON configuration")
except Exception as e:
return _err(f"Error generating section: {str(e)}")
@mcp.tool()
def apply_human_feel(ctx: Context, track_index: int, intensity: float = 0.5) -> str:
"""Apply humanization feel to a MIDI track (T014).
Adds velocity and timing variations for a more natural feel.
Args:
track_index: Index of the track to humanize
intensity: Humanization intensity 0.0-1.0 (default 0.5)
Returns:
JSON with humanization status.
"""
if not 0.0 <= intensity <= 1.0:
return _err(f"Invalid intensity: {intensity}. Must be 0.0-1.0.")
try:
resp = _send_to_ableton(
"humanize_track",
{"track_index": track_index, "intensity": intensity},
timeout=TIMEOUTS["apply_human_feel"]
)
if resp.get("status") == "success":
return _ok({
"track_index": track_index,
"intensity": intensity,
"notes_affected": resp.get("notes_affected", 0),
"velocity_variation": resp.get("velocity_variation", 0),
"timing_variation": resp.get("timing_variation", 0),
})
return _err(resp.get("message", "Failed to apply human feel"))
except Exception as e:
return _err(f"Error applying human feel: {str(e)}")
@mcp.tool()
def add_percussion_fills(ctx: Context, track_index: int, positions: list = None) -> str:
"""Add percussion fills at specified positions (T015).
Inserts drum fills at specific bars in the arrangement.
Args:
track_index: Index of the percussion track
positions: List of bar positions for fills (default [7, 15, 23, 31])
Returns:
JSON with fills addition status.
"""
if positions is None:
positions = [7, 15, 23, 31]
try:
fill_pattern = [
{"pitch": 38, "start_time": 0.0, "duration": 0.125, "velocity": 110},
{"pitch": 42, "start_time": 0.25, "duration": 0.125, "velocity": 100},
{"pitch": 38, "start_time": 0.5, "duration": 0.125, "velocity": 110},
{"pitch": 36, "start_time": 0.75, "duration": 0.125, "velocity": 120},
]
fills_added = []
for pos in positions:
full_fill = []
for note in fill_pattern:
full_fill.append({
"pitch": note["pitch"],
"start_time": note["start_time"] + pos * 2.0,
"duration": note["duration"],
"velocity": note["velocity"],
})
resp = _send_to_ableton(
"add_notes_to_clip",
{"track_index": track_index, "clip_index": 0, "notes": full_fill},
timeout=TIMEOUTS["add_percussion_fills"]
)
if resp.get("status") == "success":
fills_added.append({"position": pos, "notes": len(full_fill)})
return _ok({
"track_index": track_index,
"fills_added": len(fills_added),
"positions": positions,
"details": fills_added,
})
except Exception as e:
return _err(f"Error adding percussion fills: {str(e)}")
# ==================================================================
# FASE 6: PHASE 2 - ARRANGEMENT & AUTOMATION (T021-T026)
# ==================================================================
@mcp.tool()
def build_arrangement_structure(ctx: Context, song_config: str) -> str:
"""Build a complete arrangement structure (T021).
Creates song sections and arranges them in Arrangement View.
Args:
song_config: JSON string with song configuration
Example: '{"sections": [{"type": "intro", "bars": 8}, {"type": "verse", "bars": 16}]}'
Returns:
JSON with arrangement structure status.
"""
try:
import json as json_lib
config = json_lib.loads(song_config)
sections = config.get("sections", [])
current_bar = 0
created_sections = []
for section in sections:
section_type = section.get("type", "verse")
bars = section.get("bars", 8)
section_config = json_lib.dumps({
"type": section_type,
"bars": bars,
"elements": section.get("elements", ["drums", "bass"]),
})
result = generate_section(ctx, section_config, current_bar)
created_sections.append({
"type": section_type,
"start_bar": current_bar,
"bars": bars,
"result": result,
})
current_bar += bars
return _ok({
"total_sections": len(created_sections),
"total_bars": current_bar,
"sections": created_sections,
})
except json_lib.JSONDecodeError:
return _err("Invalid JSON configuration")
except Exception as e:
return _err(f"Error building arrangement structure: {str(e)}")
@mcp.tool()
def create_arrangement_midi_clip(ctx: Context, track_index: int, start_time: float = 0.0,
length: float = 4.0, notes: list = None) -> str:
"""Create a MIDI clip in Arrangement View (T023).
Args:
track_index: Index of the target track
start_time: Start position in bars
length: Clip length in bars
notes: List of MIDI notes to add
Returns:
JSON with clip creation status.
"""
if notes is None:
notes = []
try:
resp = _send_to_ableton(
"create_arrangement_midi_clip",
{"track_index": track_index, "start_time": start_time, "length": length, "notes": notes},
timeout=TIMEOUTS["create_arrangement_midi_clip"]
)
if resp.get("status") == "success":
return _ok({
"track_index": track_index,
"start_time": start_time,
"length": length,
"notes_added": len(notes),
"view": "Arrangement",
})
return _err(resp.get("message", "Failed to create arrangement MIDI clip"))
except Exception as e:
return _err(f"Error creating arrangement MIDI clip: {str(e)}")
@mcp.tool()
def create_arrangement_audio_clip(ctx: Context, track_index: int, sample_path: str,
start_time: float = 0.0, length: float = 4.0) -> str:
"""Create an audio clip in Arrangement View (T024).
Args:
track_index: Index of the target audio track
sample_path: Absolute path to the audio file
start_time: Start position in bars
length: Clip length in bars
Returns:
JSON with clip creation status.
"""
if not os.path.isfile(sample_path):
return _err(f"Sample not found: {sample_path}")
try:
resp = _send_to_ableton(
"create_arrangement_audio_clip",
{"track_index": track_index, "sample_path": sample_path, "start_time": start_time, "length": length},
timeout=TIMEOUTS["create_arrangement_audio_clip"]
)
if resp.get("status") == "success":
return _ok({
"track_index": track_index,
"sample_path": sample_path,
"start_time": start_time,
"length": length,
"view": "Arrangement",
})
return _err(resp.get("message", "Failed to create arrangement audio clip"))
except Exception as e:
return _err(f"Error creating arrangement audio clip: {str(e)}")
@mcp.tool()
def fill_arrangement_with_song(ctx: Context, song_config: str) -> str:
"""Fill the entire arrangement with a complete song (T025).
Populates Arrangement View with all song elements.
Args:
song_config: JSON string with complete song configuration
Example: '{"bpm": 95, "key": "Am", "style": "classic", "duration": 128}'
Returns:
JSON with song arrangement status.
"""
try:
import json as json_lib
config = json_lib.loads(song_config)
bpm = config.get("bpm", 95)
key = config.get("key", "Am")
style = config.get("style", "classic")
duration = config.get("duration", 128)
resp = _send_to_ableton(
"set_tempo",
{"tempo": bpm},
timeout=10.0
)
if resp.get("status") != "success":
return _err("Failed to set tempo")
structure_config = json_lib.dumps({
"sections": [
{"type": "intro", "bars": 8, "elements": ["drums", "bass"]},
{"type": "verse", "bars": 16, "elements": ["drums", "bass", "chords"]},
{"type": "chorus", "bars": 16, "elements": ["drums", "bass", "chords", "melody"]},
{"type": "verse", "bars": 16, "elements": ["drums", "bass", "chords"]},
{"type": "chorus", "bars": 16, "elements": ["drums", "bass", "chords", "melody"]},
{"type": "outro", "bars": 8, "elements": ["drums", "bass"]},
]
})
result = build_arrangement_structure(ctx, structure_config)
return _ok({
"bpm": bpm,
"key": key,
"style": style,
"duration_bars": duration,
"arrangement_result": result,
})
except json_lib.JSONDecodeError:
return _err("Invalid JSON configuration")
except Exception as e:
return _err(f"Error filling arrangement: {str(e)}")
@mcp.tool()
def generate_advanced_chords(ctx: Context, root: str = "C", chord_type: str = "maj9",
track_index: int = None, octave: int = 4,
voicing: str = "default", progression_roots: list = None,
progression_types: list = None, bar_length: float = 4.0,
start_bar: float = 0.0) -> str:
"""Generate advanced extended chords with professional voice leading (Agente 13).
Creates rich harmonic content with extended chords (9ths, 11ths, 13ths),
suspended chords, and altered dominants. Includes intelligent voice leading
options like drop-2, drop-3, and minimal movement between chords.
Args:
root: Root note for single chord (e.g., 'C', 'F#', 'Bb')
chord_type: Chord quality - 'maj9', 'min9', 'dom9', 'maj11', 'min11',
'maj13', 'min13', 'dom13', 'sus2', 'sus4', '7sus4',
'7b5', '7b9', '7#9', '7#11', '7b13', 'alt'
track_index: Optional track index to create MIDI clip on
octave: Octave number (4 = middle C, default)
voicing: Voice leading type - 'default', 'drop2', 'drop3', 'open', 'minimal'
progression_roots: List of roots for chord progression (e.g., ['C', 'F', 'G', 'C'])
progression_types: Parallel list of chord types (e.g., ['maj9', 'maj11', 'dom13', 'maj9'])
bar_length: Length of each chord in bars
start_bar: Starting bar position for arrangement clip
Returns:
JSON with chord notes, MIDI data, and optional clip creation status.
"""
try:
from engines.harmony_engine import ExtendedChordsEngine, CHORD_STRUCTURES, CHORD_CATEGORIES
engine = ExtendedChordsEngine()
result = {
"chord_type": chord_type,
"voicing": voicing,
"octave": octave,
}
# Generate chord or progression
if progression_roots and progression_types:
# Generate progression
chords = engine.generate_chord_progression(
roots=progression_roots,
chord_types=progression_types,
voicing=voicing
)
result["progression"] = chords
result["chord_count"] = len(chords)
notes = []
for i, chord in enumerate(chords):
for midi_note in chord["midi_notes"]:
notes.append({
"pitch": midi_note,
"start_time": start_bar * 2.0 + i * bar_length * 2.0,
"duration": bar_length * 2.0,
"velocity": 80,
})
result["total_notes"] = len(notes)
else:
# Generate single chord
chord = engine.generate_extended_chord(root, chord_type, octave, voicing)
result["chord"] = chord
notes = []
for midi_note in chord["midi_notes"]:
notes.append({
"pitch": midi_note,
"start_time": start_bar * 2.0,
"duration": bar_length * 2.0,
"velocity": 80,
})
result["total_notes"] = len(notes)
# Add available chord types info
result["available_categories"] = CHORD_CATEGORIES
result["available_types"] = engine.get_available_chord_types()
# Create MIDI clip if track_index provided
if track_index is not None:
resp = _send_to_ableton(
"create_clip",
{"track_index": track_index, "clip_index": 0, "length": bar_length * 2.0},
timeout=TIMEOUTS["generate_advanced_chords"]
)
if resp.get("status") == "success":
resp2 = _send_to_ableton(
"add_notes_to_clip",
{"track_index": track_index, "clip_index": 0, "notes": notes},
timeout=TIMEOUTS["generate_advanced_chords"]
)
if resp2.get("status") == "success":
result["clip_created"] = True
result["track_index"] = track_index
result["notes_added"] = len(notes)
else:
result["clip_created"] = False
result["clip_error"] = resp2.get("message", "Failed to add notes")
else:
result["clip_created"] = False
result["clip_error"] = resp.get("message", "Failed to create clip")
return _ok(result)
except Exception as e:
return _err(f"Error generating advanced chords: {str(e)}")
@mcp.tool()
def automate_filter(ctx: Context, track_index: int, start_bar: float = 0.0,
end_bar: float = 8.0, start_freq: float = 200.0,
end_freq: float = 20000.0,
curve_type: str = "s_curve") -> str:
"""Automate a filter sweep on a track (T026).
Creates automation for filter frequency from start to end.
Args:
track_index: Index of the target track
start_bar: Start bar for automation
end_bar: End bar for automation
start_freq: Starting filter frequency in Hz
end_freq: Ending filter frequency in Hz
curve_type: Type of interpolation curve ("linear", "bezier", "s_curve",
"exponential", "stepped"). Default: "s_curve"
Returns:
JSON with automation creation status.
"""
return _proxy_ableton_command(
"automate_filter",
{
"track_index": track_index,
"start_bar": start_bar,
"end_bar": end_bar,
"start_freq": start_freq,
"end_freq": end_freq,
"curve_type": curve_type,
},
timeout=TIMEOUTS["automate_filter"],
defaults={
"track_index": track_index,
"start_bar": start_bar,
"end_bar": end_bar,
"start_freq": start_freq,
"end_freq": end_freq,
"curve_type": curve_type,
},
)
@mcp.tool()
def generate_curve_automation(ctx: Context, track_index: int, parameter: str,
points: list, curve_type: str = "linear",
grid_quantization: str = "none") -> str:
"""Generate advanced automation with curve interpolation (Agente 6).
Creates automation curves using various interpolation methods.
Args:
track_index: Index of the target track
parameter: Parameter name to automate (e.g., "volume", "filter_freq")
points: List of {time: float, value: float} control points
curve_type: Interpolation type ("linear", "bezier", "s_curve",
"exponential", "stepped")
grid_quantization: Grid size ("1/4", "1/8", "1/16", "1/32", "none")
Returns:
JSON with automation points and status.
"""
try:
# Import curve interpolation engine
from engines.curve_interpolation import (
generate_curve, CurveConfig, CurveType,
GridQuantization, AutomationPoint
)
# Parse curve type
curve_type_map = {
"linear": CurveType.LINEAR,
"bezier": CurveType.BEZIER,
"s_curve": CurveType.S_CURVE,
"s-curve": CurveType.S_CURVE,
"scurve": CurveType.S_CURVE,
"exponential": CurveType.EXPONENTIAL,
"exp": CurveType.EXPONENTIAL,
"stepped": CurveType.STEPPED,
"steps": CurveType.STEPPED,
}
ct = curve_type_map.get(curve_type.lower(), CurveType.LINEAR)
# Parse grid quantization
grid_map = {
"none": None,
"1/4": GridQuantization.QUARTER,
"quarter": GridQuantization.QUARTER,
"1/8": GridQuantization.EIGHTH,
"eighth": GridQuantization.EIGHTH,
"1/16": GridQuantization.SIXTEENTH,
"sixteenth": GridQuantization.SIXTEENTH,
"1/32": GridQuantization.THIRTYSECOND,
"thirtysecond": GridQuantization.THIRTYSECOND,
}
grid = grid_map.get(grid_quantization.lower(), None)
# Convert points to tuples
point_tuples = [(p.get("time", 0.0), p.get("value", 0.0)) for p in points]
# Generate curve
config = CurveConfig(
curve_type=ct,
quantize_grid=grid
)
automation_points = generate_curve(point_tuples, config)
# Convert to serializable format
points_data = [p.to_dict() for p in automation_points]
return _ok({
"track_index": track_index,
"parameter": parameter,
"curve_type": curve_type,
"grid_quantization": grid_quantization,
"points_generated": len(points_data),
"automation_points": points_data[:50], # Limit to first 50 points
"note": "Automation envelope generated. Apply to Ableton clip to use.",
})
except ImportError:
return _err("Curve interpolation engine not available.")
except Exception as e:
return _err(f"Error generating curve automation: {str(e)}")
# ==================================================================
# AGENTE 5: MULTI-PARAMETER AUTOMATION (live_bridge exposure)
# ==================================================================
@mcp.tool()
def add_parameter_automation(ctx: Context, track_index: int, parameter_name: str,
points: list, device_name: str = "",
clip_index: int = None, send_index: int = None) -> str:
"""Add automation envelope to track parameters (volume, pan, device params, sends).
Agente 5: Exposes live_bridge.add_automation() for multi-parameter automation.
Supports track-level automation (volume, pan, sends) and clip/device automation.
Args:
track_index: Index of the target track
parameter_name: Name of parameter to automate ("volume", "pan", "device_param", etc.)
points: List of [time, value] pairs where time is in beats and value is 0.0-1.0
device_name: Name of device (only for device_param automation, e.g., "EQ Eight")
clip_index: Clip index (only for clip automation)
send_index: Send index (only for send automation, 0-based)
Returns:
JSON with automation creation status.
Examples:
# Volume automation (track level)
add_parameter_automation(track_index=0, parameter_name="volume",
points=[[0.0, 0.8], [4.0, 1.0], [8.0, 0.6]])
# Pan automation (track level)
add_parameter_automation(track_index=1, parameter_name="pan",
points=[[0.0, 0.0], [8.0, -0.5], [16.0, 0.5]])
# Send automation
add_parameter_automation(track_index=0, parameter_name="send",
points=[[0.0, 0.0], [4.0, 0.5]], send_index=0)
# Device parameter automation
add_parameter_automation(track_index=0, parameter_name="Frequency",
points=[[0.0, 200.0], [8.0, 20000.0]],
device_name="Auto Filter", clip_index=0)
"""
params = {
"track_index": track_index,
"parameter_name": parameter_name,
"points": points,
}
if device_name:
params["device_name"] = device_name
if clip_index is not None:
params["clip_index"] = clip_index
if send_index is not None:
params["send_index"] = send_index
return _proxy_ableton_command(
"add_parameter_automation",
params,
timeout=TIMEOUTS["add_parameter_automation"],
defaults={
"track_index": track_index,
"parameter_name": parameter_name,
"points_count": len(points),
},
)
# ==================================================================
# FASE 2.5: FX CREATOR TOOLS (T031-T035) - Exposición de arrangement_engine
# ==================================================================
@mcp.tool()
def create_riser(ctx: Context, track_index: int, start_bar: int,
duration: int = 8, intensity: float = 0.8,
pitch_min: int = 36, pitch_max: int = 84) -> str:
"""Create a riser/buildup effect (T031).
Generates a pre-drop riser with ascending pitch and tension.
Perfect for build-ups before choruses or drops.
Args:
track_index: Index of the target track
start_bar: Start bar for the riser
duration: Duration in bars (default 8)
intensity: Intensity 0.0-1.0 (default 0.8)
pitch_min: Minimum MIDI pitch (default 36 = C2)
pitch_max: Maximum MIDI pitch (default 84 = C6)
Returns:
JSON with riser creation status and clip info.
"""
return _proxy_ableton_command(
"create_riser",
{
"track_index": track_index,
"start_bar": start_bar,
"duration": duration,
"intensity": intensity,
"pitch_range": [pitch_min, pitch_max],
},
timeout=30.0,
defaults={
"track_index": track_index,
"start_bar": start_bar,
"duration": duration,
"intensity": intensity,
},
)
@mcp.tool()
def create_downlifter(ctx: Context, track_index: int, start_bar: int,
duration: int = 4, intensity: float = 0.7,
pitch_start: int = 72, pitch_end: int = 36) -> str:
"""Create a downlifter effect (T032).
Generates a post-drop downlifter with descending pitch.
Perfect for energy release after drops or impacts.
Args:
track_index: Index of the target track
start_bar: Start bar for the downlifter
duration: Duration in bars (default 4)
intensity: Intensity 0.0-1.0 (default 0.7)
pitch_start: Starting MIDI pitch (default 72 = C5)
pitch_end: Ending MIDI pitch (default 36 = C2)
Returns:
JSON with downlifter creation status and clip info.
"""
return _proxy_ableton_command(
"create_downlifter",
{
"track_index": track_index,
"start_bar": start_bar,
"duration": duration,
"intensity": intensity,
"pitch_range": [pitch_start, pitch_end],
},
timeout=30.0,
defaults={
"track_index": track_index,
"start_bar": start_bar,
"duration": duration,
"intensity": intensity,
},
)
@mcp.tool()
def create_impact(ctx: Context, track_index: int, position: float,
intensity: float = 1.0, impact_type: str = "hit") -> str:
"""Create an impact FX (T033).
Generates impact effects (hit, crash, sub drop, noise).
Perfect for emphasizing drops, transitions, or beats.
Args:
track_index: Index of the target track
position: Position in bars (int) or beats (float)
intensity: Intensity 0.0-1.0 (default 1.0)
impact_type: Type of impact - "hit", "crash", "sub_drop", "noise"
Returns:
JSON with impact creation status and clip info.
"""
return _proxy_ableton_command(
"create_impact",
{
"track_index": track_index,
"position": position,
"intensity": intensity,
"impact_type": impact_type,
},
timeout=30.0,
defaults={
"track_index": track_index,
"position": position,
"intensity": intensity,
"impact_type": impact_type,
},
)
@mcp.tool()
def create_silence(ctx: Context, track_index: int, start_bar: int,
duration: int = 1) -> str:
"""Create silence/break effect (T034).
Generates a moment of silence for dramatic effect.
Perfect for creating tension before drops.
Args:
track_index: Index of the target track (for context)
start_bar: Start bar for the silence
duration: Duration in bars (default 1)
Returns:
JSON with silence creation status.
"""
return _proxy_ableton_command(
"create_silence",
{
"track_index": track_index,
"start_bar": start_bar,
"duration": duration,
},
timeout=30.0,
defaults={
"track_index": track_index,
"start_bar": start_bar,
"duration": duration,
},
)
@mcp.tool()
def create_fx_section(ctx: Context, section_type: str, start_bar: int,
duration: int = 8, track_indices: list = None) -> str:
"""Create complete FX section (T035).
Generates a complete FX section with risers, impacts, and transitions.
Args:
section_type: Type - "pre_drop", "post_drop", "transition", "build"
start_bar: Start bar for the section
duration: Duration in bars (default 8)
track_indices: List of track indices to apply FX (optional)
Returns:
JSON with FX section creation status.
"""
return _proxy_ableton_command(
"create_fx_section",
{
"section_type": section_type,
"start_bar": start_bar,
"duration": duration,
"track_indices": track_indices or [],
},
timeout=30.0,
defaults={
"section_type": section_type,
"start_bar": start_bar,
"duration": duration,
},
)
# ==================================================================
# AGENTE 3: TRANSITIONS & FILLS (Exposición de pattern_library.py)
# ==================================================================
@mcp.tool()
def create_fx_hit(ctx: Context, track_index: int, position: float,
fx_type: str = "riser", duration: float = 2.0) -> str:
"""Create an FX hit at a specific position.
Generates single FX hits like risers, downers, impacts, crashes, and sweeps.
Uses PercussionLibrary.get_fx_hit() from pattern_library.py.
Args:
track_index: Index of the target track
position: Position in beats (float) or bars (int)
fx_type: Type of FX - "riser", "downer", "impact", "crash", "sweep" (default "riser")
duration: Duration of the FX in beats (default 2.0)
Returns:
JSON with FX hit creation status and note details.
"""
valid_types = ["riser", "downer", "impact", "crash", "sweep"]
if fx_type not in valid_types:
return _err(f"Invalid fx_type: {fx_type}. Must be one of: {', '.join(valid_types)}")
return _proxy_ableton_command(
"create_fx_hit",
{
"track_index": track_index,
"position": position,
"fx_type": fx_type,
"duration": duration,
},
timeout=30.0,
defaults={
"track_index": track_index,
"position": position,
"fx_type": fx_type,
"duration": duration,
},
)
@mcp.tool()
def create_transition_fill(ctx: Context, track_index: int, position: float,
fill_type: str = "break") -> str:
"""Create a transition fill at a specific position.
Generates transition fills for breaks, builds, drops, and impacts.
Uses PercussionLibrary.get_transition_fill() from pattern_library.py.
Args:
track_index: Index of the target track
position: Position in beats (float) or bars (int)
fill_type: Type of fill - "break", "build", "drop", "impact" (default "break")
Returns:
JSON with transition fill creation status and note details.
"""
valid_types = ["break", "build", "drop", "impact"]
if fill_type not in valid_types:
return _err(f"Invalid fill_type: {fill_type}. Must be one of: {', '.join(valid_types)}")
return _proxy_ableton_command(
"create_transition_fill",
{
"track_index": track_index,
"position": position,
"fill_type": fill_type,
},
timeout=30.0,
defaults={
"track_index": track_index,
"position": position,
"fill_type": fill_type,
},
)
@mcp.tool()
def create_intro_buildup(ctx: Context, track_index: int, bars: int = 4) -> str:
"""Create an intro buildup section.
Generates a buildup pattern for intros with increasing density and a final riser.
Uses PercussionLibrary.get_intro_buildup() from pattern_library.py.
Args:
track_index: Index of the target track
bars: Number of bars for the buildup (default 4)
Returns:
JSON with intro buildup creation status and note details.
"""
return _proxy_ableton_command(
"create_intro_buildup",
{
"track_index": track_index,
"bars": bars,
},
timeout=30.0,
defaults={
"track_index": track_index,
"bars": bars,
},
)
@mcp.tool()
def create_fx_automation(ctx: Context, track_index: int, fx_type: str,
section: int = 0) -> str:
"""Create FX automation on a track (T050).
Uses workflow_engine.create_fx_automation() to create parameter automation
for various effects types.
Args:
track_index: Index of the target track
fx_type: Type of FX automation - "filter_sweep", "reverb_duck",
"delay_wash", or "volume_fade"
section: Section index to apply automation to (default 0)
Returns:
JSON with automation creation status and parameter details.
"""
valid_fx_types = ["filter_sweep", "reverb_duck", "delay_wash", "volume_fade"]
if fx_type not in valid_fx_types:
return _err(
f"Invalid fx_type: {fx_type}. Must be one of: {', '.join(valid_fx_types)}"
)
try:
from engines.workflow_engine import WorkflowEngine
engine = WorkflowEngine()
result = engine.create_fx_automation(
track_index=track_index,
fx_type=fx_type,
section=section
)
if result.get("status") == "success":
return _ok({
"track_index": track_index,
"fx_type": fx_type,
"section": section,
"automation": result.get("automation", {}),
"description": result.get("automation", {}).get("description", ""),
"points_count": len(result.get("automation", {}).get("automation_points", [])),
})
return _err(result.get("message", "Failed to create FX automation"))
except ImportError:
return _err("Workflow engine not available.")
except Exception as e:
return _err(f"Error creating FX automation: {str(e)}")
# ==================================================================
# AGENTE 4: WHITE NOISE GENERATOR
# ==================================================================
@mcp.tool()
def create_white_noise(ctx: Context, duration: float = 4.0, sample_rate: int = 44100,
effect_type: str = "basic", start_freq: float = 200.0,
end_freq: float = 8000.0) -> str:
"""Genera ruido blanco programático con efectos aplicados (Agente 4).
Crea ruido blanco programáticamente (sin cargar samples externos) y
aplica efectos como filtros sweep y envolventes de volumen.
Args:
duration: Duración en segundos (default 4.0)
sample_rate: Frecuencia de muestreo (default 44100 Hz)
effect_type: Tipo de efecto - "basic", "riser", "downlifter", "sweep"
start_freq: Frecuencia inicial para sweep (default 200 Hz)
end_freq: Frecuencia final para sweep (default 8000 Hz)
Returns:
JSON con información del archivo generado y parámetros aplicados.
"""
try:
from engines.noise_generator import WhiteNoiseGenerator, get_noise_generator
generator = get_noise_generator()
# Generar ruido base
noise_clip = generator.generate_white_noise(duration, sample_rate)
# Aplicar efectos según tipo solicitado
if effect_type == "basic":
result = noise_clip
elif effect_type == "riser":
result = generator.create_riser_effect(duration, sample_rate, start_freq, end_freq)
elif effect_type == "downlifter":
result = generator.create_downlifter_effect(duration, sample_rate, end_freq, start_freq)
elif effect_type == "sweep":
# Aplicar sweep personalizado
result = generator.apply_filter_sweep(noise_clip, start_freq, end_freq)
else:
return _err(f"Invalid effect_type: {effect_type}. Must be 'basic', 'riser', 'downlifter', or 'sweep'")
return _ok({
"file_path": result.get("file_path"),
"duration": result.get("duration"),
"sample_rate": result.get("sample_rate"),
"effect_type": effect_type,
"type": result.get("type", "white_noise"),
"description": result.get("description", "Generated white noise"),
})
except ImportError:
return _err("Noise generator engine not available.")
except Exception as e:
return _err(f"Error generating white noise: {str(e)}")
# ==================================================================
# FASE 3: INTELIGENCIA MUSICAL (T041-T060)
# ==================================================================
@mcp.tool()
def analyze_project_key(ctx: Context) -> str:
"""Detecta el key predominante del proyecto actual (T041)."""
return _proxy_ableton_command("analyze_project_key", timeout=TIMEOUTS["analyze_project_key"])
@mcp.tool()
def harmonize_track(ctx: Context, track_index: int, progression: str = "I-V-vi-IV") -> str:
"""Armoniza un track con una progresion de acordes (T042).
Args:
track_index: Indice del track a armonizar
progression: Progresion de acordes (ej: "I-V-vi-IV", "ii-V-I", "I-IV-V")
"""
return _proxy_ableton_command(
"harmonize_track",
{"track_index": track_index, "progression": progression},
timeout=TIMEOUTS["harmonize_track"],
defaults={"track_index": track_index, "progression": progression},
)
@mcp.tool()
def generate_counter_melody(ctx: Context, main_melody_track: int) -> str:
"""Genera una contra-melodia que complementa la melodia principal (T043).
Args:
main_melody_track: Indice del track con la melodia principal
"""
return _proxy_ableton_command(
"generate_counter_melody",
{"main_melody_track": main_melody_track},
timeout=TIMEOUTS["generate_counter_melody"],
defaults={"main_melody_track": main_melody_track},
)
@mcp.tool()
def detect_energy_curve(ctx: Context) -> str:
"""Analiza la curva de energia por seccion del proyecto (T044)."""
return _proxy_ableton_command("detect_energy_curve", timeout=TIMEOUTS["detect_energy_curve"])
@mcp.tool()
def balance_sections(ctx: Context) -> str:
"""Ajusta automaticamente la energia entre secciones (T045)."""
return _proxy_ableton_command("balance_sections", timeout=TIMEOUTS["balance_sections"])
@mcp.tool()
def variate_loop(ctx: Context, track_index: int, intensity: float = 0.5) -> str:
"""Crea variaciones de un loop para evitar repetitividad (T046).
Args:
track_index: Indice del track con el loop
intensity: Intensidad de variacion (0.0-1.0)
"""
if not 0.0 <= intensity <= 1.0:
return _err(f"Invalid intensity: {intensity}. Must be 0.0-1.0.")
return _proxy_ableton_command(
"variate_loop",
{"track_index": track_index, "intensity": intensity},
timeout=TIMEOUTS["variate_loop"],
defaults={"track_index": track_index, "intensity": intensity},
)
@mcp.tool()
def add_call_and_response(ctx: Context, phrase_track: int, response_length: int = 2) -> str:
"""Genera una respuesta musical a una frase existente (T047).
Args:
phrase_track: Indice del track con la frase original
response_length: Duracion de la respuesta en compases
"""
return _proxy_ableton_command(
"add_call_and_response",
{"phrase_track": phrase_track, "response_length": response_length},
timeout=TIMEOUTS["add_call_and_response"],
defaults={"phrase_track": phrase_track, "response_length": response_length},
)
@mcp.tool()
def generate_breakdown(ctx: Context, start_bar: int, duration: int = 8) -> str:
"""Genera una seccion de breakdown/descanso (T048).
Args:
start_bar: Barra donde comienza el breakdown
duration: Duracion en compases (default 8)
"""
return _proxy_ableton_command(
"generate_breakdown",
{"start_bar": start_bar, "duration": duration},
timeout=TIMEOUTS["generate_breakdown"],
defaults={"start_bar": start_bar, "duration": duration},
)
@mcp.tool()
def generate_drop_variation(ctx: Context, original_drop_bar: int, variation_type: str = "intense") -> str:
"""Genera una variacion de un drop existente (T049).
Args:
original_drop_bar: Barra donde esta el drop original
variation_type: Tipo de variacion ("intense", "minimal", "double", "fill")
"""
return _proxy_ableton_command(
"generate_drop_variation",
{"original_drop_bar": original_drop_bar, "variation_type": variation_type},
timeout=TIMEOUTS["generate_drop_variation"],
defaults={"original_drop_bar": original_drop_bar, "variation_type": variation_type},
)
@mcp.tool()
def create_outro(ctx: Context, fade_duration: int = 8) -> str:
"""Crea un outro con fade out automatico (T050).
Args:
fade_duration: Duracion del fade en compases
"""
return _proxy_ableton_command(
"create_outro",
{"fade_duration": fade_duration},
timeout=TIMEOUTS["create_outro"],
defaults={"fade_duration": fade_duration},
)
# ==================================================================
# FASE 4: WORKFLOW Y PRODUCCION (T061-T080)
# ==================================================================
@mcp.tool()
def load_preset(ctx: Context, preset_name: str) -> str:
"""Carga un preset en el proyecto actual (T062).
Args:
preset_name: Nombre del preset a cargar
"""
try:
from engines.workflow_engine import WorkflowEngine
engine = WorkflowEngine()
result = engine.load_preset(preset_name)
if result.get("success"):
return _ok({
"preset_name": preset_name,
"tracks_loaded": result.get("tracks_loaded", 0),
"devices_loaded": result.get("devices_loaded", 0),
"samples_loaded": result.get("samples_loaded", [])
})
return _err(result.get("message", "Failed to load preset"))
except Exception as e:
return _err(f"Error loading preset: {str(e)}")
@mcp.tool()
def save_as_preset(ctx: Context, name: str, description: str = "") -> str:
"""Guarda el proyecto actual como preset (T063).
Args:
name: Nombre del preset
description: Descripcion opcional
"""
try:
from engines.workflow_engine import WorkflowEngine
engine = WorkflowEngine()
result = engine.save_as_preset(name, description)
if result.get("success"):
return _ok({
"preset_name": name,
"description": description,
"saved_path": result.get("path"),
"tracks_included": result.get("tracks_included", 0)
})
return _err(result.get("message", "Failed to save preset"))
except Exception as e:
return _err(f"Error saving preset: {str(e)}")
@mcp.tool()
def list_presets(ctx: Context) -> str:
"""Lista todos los presets disponibles (T064)."""
try:
from engines.workflow_engine import WorkflowEngine
engine = WorkflowEngine()
result = engine.list_presets()
return _ok({
"presets": result.get("presets", []),
"total_count": result.get("count", 0),
"categories": result.get("categories", [])
})
except Exception as e:
return _err(f"Error listing presets: {str(e)}")
@mcp.tool()
def create_custom_preset(ctx: Context, name: str, description: str = "") -> str:
"""Crea un preset personalizado desde cero (T065).
Args:
name: Nombre del preset
description: Descripcion del preset
"""
try:
from engines.workflow_engine import WorkflowEngine
engine = WorkflowEngine()
result = engine.create_custom_preset(name, description)
if result.get("success"):
return _ok({
"preset_name": name,
"description": description,
"template_created": True,
"base_tracks": result.get("base_tracks", [])
})
return _err(result.get("message", "Failed to create preset"))
except Exception as e:
return _err(f"Error creating custom preset: {str(e)}")
@mcp.tool()
def render_stems(ctx: Context, output_dir: str) -> str:
"""Renderiza stems individuales para mezcla externa (T066).
Args:
output_dir: Directorio de salida para los stems
"""
return _proxy_ableton_command(
"render_stems",
{"output_dir": output_dir},
timeout=TIMEOUTS["render_stems"],
defaults={"output_dir": output_dir},
)
@mcp.tool()
def render_full_mix(ctx: Context, output_path: str) -> str:
"""Renderiza el mix completo masterizado (T067).
Args:
output_path: Ruta del archivo de salida
"""
return _proxy_ableton_command(
"render_full_mix",
{"output_path": output_path},
timeout=TIMEOUTS["render_full_mix"],
defaults={"output_path": output_path},
)
@mcp.tool()
def render_instrumental(ctx: Context, output_path: str) -> str:
"""Renderiza version instrumental (sin tracks de voz) (T068).
Args:
output_path: Ruta del archivo de salida
"""
return _proxy_ableton_command(
"render_instrumental",
{"output_path": output_path},
timeout=TIMEOUTS["render_instrumental"],
defaults={"output_path": output_path},
)
@mcp.tool()
def full_quality_check(ctx: Context) -> str:
"""Quality check completo del proyecto (T071)."""
return _proxy_ableton_command("full_quality_check", timeout=TIMEOUTS["full_quality_check"])
@mcp.tool()
def fix_quality_issues(ctx: Context, issues: list = None) -> str:
"""Arregla automaticamente los problemas detectados (T072).
Args:
issues: Lista de issues especificos a arreglar (default: todos)
"""
if issues is None:
issues = []
return _proxy_ableton_command(
"fix_quality_issues",
{"issues": issues},
timeout=TIMEOUTS["fix_quality_issues"],
defaults={"issues": issues},
)
@mcp.tool()
def duplicate_project(ctx: Context, new_name: str) -> str:
"""Duplica el proyecto actual con nuevo nombre (T076).
Args:
new_name: Nombre para el proyecto duplicado
"""
return _proxy_ableton_command(
"duplicate_project",
{"new_name": new_name},
timeout=TIMEOUTS["duplicate_project"],
defaults={"new_name": new_name},
)
@mcp.tool()
def create_radio_edit(ctx: Context, output_path: str) -> str:
"""Crea una version radio edit (corta, sin intros largas) (T078).
Args:
output_path: Ruta del archivo de salida
"""
return _proxy_ableton_command(
"create_radio_edit",
{"output_path": output_path},
timeout=TIMEOUTS["create_radio_edit"],
defaults={"output_path": output_path},
)
@mcp.tool()
def create_dj_edit(ctx: Context, output_path: str) -> str:
"""Crea una version DJ edit (extended intro/outro, cue points) (T079).
Args:
output_path: Ruta del archivo de salida
"""
return _proxy_ableton_command(
"create_dj_edit",
{"output_path": output_path},
timeout=TIMEOUTS["create_dj_edit"],
defaults={"output_path": output_path},
)
# ==================================================================
# FASES 6-9: Session Orchestrator + Warp Automation + Full MIDI Orchestration + MCP Tools
# ==================================================================
@mcp.tool()
def analyze_all_bpm(ctx: Context, force_reanalyze: bool = False) -> str:
"""
Analyze BPM of all samples in library (800+) using librosa.
Stores results in SQLite metadata store.
Args:
force_reanalyze: Reanalyze even if already in database
"""
try:
from engines.bpm_analyzer import BPMAnalyzer, analyze_sample
analyzer = BPMAnalyzer()
result = analyzer.analyze_all_library(force_reanalyze=force_reanalyze)
return _ok({
"total_samples": result.get("total_samples", 0),
"analyzed": result.get("analyzed", 0),
"errors": result.get("errors", 0),
"metadata_store_updated": True,
"force_reanalyze": force_reanalyze,
})
except ImportError:
return _err("BPM analyzer engine not available.")
except Exception as e:
return _err(f"Error analyzing library BPM: {str(e)}")
@mcp.tool()
def validate_session(ctx: Context) -> str:
"""
Validate all MIDI tracks in Session View have instruments loaded.
Reports which tracks need fixing.
"""
try:
resp = _send_to_ableton("get_tracks", timeout=TIMEOUTS["get_tracks"])
if resp.get("status") != "success":
return _err("Failed to get tracks from Ableton")
tracks = resp.get("result", {}).get("tracks", [])
midi_tracks_without_instruments = []
for track in tracks:
if track.get("is_midi"):
track_idx = track.get("index")
track_name = track.get("name", f"Track {track_idx}")
device_count = track.get("device_count", 0)
if device_count == 0:
midi_tracks_without_instruments.append({
"index": track_idx,
"name": track_name,
"issue": "No instruments loaded"
})
return _ok({
"valid": len(midi_tracks_without_instruments) == 0,
"midi_tracks_checked": sum(1 for t in tracks if t.get("is_midi")),
"tracks_needing_fix": midi_tracks_without_instruments,
"total_issues": len(midi_tracks_without_instruments),
})
except Exception as e:
return _err(f"Error validating session: {str(e)}")
@mcp.tool()
def fix_session_midi_tracks(ctx: Context) -> str:
"""
Auto-fix MIDI tracks by loading appropriate instruments.
Detects track type from name (Piano -> Grand Piano, etc.)
"""
try:
resp = _send_to_ableton("fix_session_midi_tracks", timeout=30.0)
if resp.get("status") == "success":
result = resp.get("result", {})
fixed_tracks = result.get("fixed_tracks", [])
return _ok({
"fixed_count": len(fixed_tracks),
"fixed_tracks": fixed_tracks,
"message": f"Fixed {len(fixed_tracks)} MIDI tracks with instruments",
})
return _err(resp.get("message", "Failed to fix session MIDI tracks"))
except Exception as e:
return _err(f"Error fixing session MIDI tracks: {str(e)}")
@mcp.tool()
def select_bpm_coherent_pool(ctx: Context, target_bpm: int = 95,
tolerance: int = 5, pool_size: int = 20) -> str:
"""
Select samples that match target BPM within tolerance.
Uses librosa-analyzed BPM data from metadata store.
Args:
target_bpm: Target tempo (default 95)
tolerance: BPM tolerance (default ±5)
pool_size: Number of samples to return
"""
try:
from engines.bpm_analyzer import BPMAnalyzer
analyzer = BPMAnalyzer()
pool = analyzer.select_bpm_coherent_pool(
target_bpm=target_bpm,
tolerance=tolerance,
pool_size=pool_size
)
return _ok({
"target_bpm": target_bpm,
"tolerance": tolerance,
"pool_size": len(pool),
"samples": [
{
"path": s.get("path"),
"name": s.get("name"),
"bpm": s.get("bpm"),
"role": s.get("role"),
"deviation": abs(s.get("bpm", target_bpm) - target_bpm)
}
for s in pool
],
})
except ImportError:
return _err("BPM analyzer engine not available.")
except Exception as e:
return _err(f"Error selecting BPM coherent pool: {str(e)}")
@mcp.tool()
def warp_clip_to_bpm(ctx: Context, track_index: int, clip_index: int,
original_bpm: float, target_bpm: float) -> str:
"""
Warp audio clip from original BPM to target BPM.
Automatically selects warp mode (Complex Pro/Complex/Beats).
Args:
track_index: Track containing clip
clip_index: Clip slot index
original_bpm: Original sample BPM (from analysis)
target_bpm: Target project BPM
"""
try:
resp = _send_to_ableton(
"auto_warp_sample",
{
"track_index": track_index,
"clip_index": clip_index,
"original_bpm": original_bpm,
"target_bpm": target_bpm,
},
timeout=15.0
)
if resp.get("status") == "success":
result = resp.get("result", {})
return _ok({
"warped": result.get("warped", False),
"track_index": track_index,
"clip_index": clip_index,
"original_bpm": result.get("original_bpm"),
"target_bpm": result.get("target_bpm"),
"warp_factor": result.get("warp_factor"),
"warp_mode": result.get("warp_mode"),
"delta_pct": result.get("delta_pct"),
})
return _err(resp.get("message", "Failed to warp clip"))
except Exception as e:
return _err(f"Error warping clip: {str(e)}")
# ==================================================================
# FASE 5: INTEGRACION FINAL (T081-T100)
# ==================================================================
@mcp.tool()
def help(ctx: Context, tool_name: str = "") -> str:
"""Lista todas las tools disponibles o ayuda detallada de una tool especifica (T096).
Args:
tool_name: Nombre de la tool para ayuda detallada (opcional). Si vacio, lista todas.
"""
tools_db = {
# Info
"get_session_info": {"description": "Obtiene informacion completa de la sesion actual de Ableton Live", "category": "Info", "params": [], "example": "get_session_info()"},
"get_tracks": {"description": "Obtiene la lista de todas las pistas del proyecto", "category": "Info", "params": [], "example": "get_tracks()"},
"get_scenes": {"description": "Obtiene la lista de todas las escenas en Session View", "category": "Info", "params": [], "example": "get_scenes()"},
"get_master_info": {"description": "Obtiene informacion de la pista master", "category": "Info", "params": [], "example": "get_master_info()"},
"health_check": {"description": "Verificacion completa del sistema (5 chequeos, score 0-5). EJECUTAR PRIMERO", "category": "Info", "params": [], "example": "health_check()"},
# Transport
"start_playback": {"description": "Inicia la reproduccion", "category": "Transport", "params": [], "example": "start_playback()"},
"stop_playback": {"description": "Detiene la reproduccion", "category": "Transport", "params": [], "example": "stop_playback()"},
"toggle_playback": {"description": "Alterna reproduccion/parada", "category": "Transport", "params": [], "example": "toggle_playback()"},
"stop_all_clips": {"description": "Detiene todos los clips en Session View", "category": "Transport", "params": [], "example": "stop_all_clips()"},
# Settings
"set_tempo": {"description": "Establece el tempo del proyecto en BPM", "category": "Settings", "params": [{"name": "tempo", "type": "float", "range": "20-300"}], "example": "set_tempo(tempo=95)"},
"set_time_signature": {"description": "Establece la firma de tiempo", "category": "Settings", "params": [{"name": "numerator", "type": "int", "default": 4}, {"name": "denominator", "type": "int", "default": 4}], "example": "set_time_signature(numerator=4, denominator=4)"},
"set_metronome": {"description": "Activa o desactiva el metronomo", "category": "Settings", "params": [{"name": "enabled", "type": "bool"}], "example": "set_metronome(enabled=True)"},
# Tracks
"create_midi_track": {"description": "Crea una nueva pista MIDI", "category": "Tracks", "params": [{"name": "index", "type": "int", "default": -1}], "example": "create_midi_track(index=-1)"},
"create_audio_track": {"description": "Crea una nueva pista de audio", "category": "Tracks", "params": [{"name": "index", "type": "int", "default": -1}], "example": "create_audio_track(index=-1)"},
"set_track_name": {"description": "Establece el nombre de una pista", "category": "Tracks", "params": [{"name": "track_index", "type": "int"}, {"name": "name", "type": "str"}], "example": "set_track_name(track_index=0, name='Drums')"},
"set_track_volume": {"description": "Establece el volumen de una pista (0.0-1.0)", "category": "Tracks", "params": [{"name": "track_index", "type": "int"}, {"name": "volume", "type": "float", "range": "0.0-1.0"}], "example": "set_track_volume(track_index=0, volume=0.8)"},
"set_track_pan": {"description": "Establece el paneo de una pista (-1.0 a 1.0)", "category": "Tracks", "params": [{"name": "track_index", "type": "int"}, {"name": "pan", "type": "float", "range": "-1.0 a 1.0"}], "example": "set_track_pan(track_index=0, pan=0.0)"},
"set_track_mute": {"description": "Silencia o reactiva una pista", "category": "Tracks", "params": [{"name": "track_index", "type": "int"}, {"name": "mute", "type": "bool"}], "example": "set_track_mute(track_index=0, mute=True)"},
"set_track_solo": {"description": "Activa o desactiva solo en una pista", "category": "Tracks", "params": [{"name": "track_index", "type": "int"}, {"name": "solo", "type": "bool"}], "example": "set_track_solo(track_index=0, solo=True)"},
"set_master_volume": {"description": "Establece el volumen master (0.0-1.0)", "category": "Tracks", "params": [{"name": "volume", "type": "float", "range": "0.0-1.0"}], "example": "set_master_volume(volume=0.8)"},
# Clips
"create_clip": {"description": "Crea un clip MIDI en Session View", "category": "Clips", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int", "default": 0}, {"name": "length", "type": "float", "default": 4.0}], "example": "create_clip(track_index=0, clip_index=0, length=4.0)"},
"add_notes_to_clip": {"description": "Aniade notas MIDI a un clip", "category": "Clips", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int"}, {"name": "notes", "type": "list"}], "example": "add_notes_to_clip(track_index=0, clip_index=0, notes=[{'pitch':36,'start_time':0.0,'duration':0.25,'velocity':100}])"},
"fire_clip": {"description": "Dispara un clip en Session View", "category": "Clips", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int", "default": 0}], "example": "fire_clip(track_index=0, clip_index=0)"},
"fire_scene": {"description": "Dispara una escena completa", "category": "Clips", "params": [{"name": "scene_index", "type": "int"}], "example": "fire_scene(scene_index=0)"},
"set_scene_name": {"description": "Establece el nombre de una escena", "category": "Clips", "params": [{"name": "scene_index", "type": "int"}, {"name": "name", "type": "str"}], "example": "set_scene_name(scene_index=0, name='Verse')"},
"create_scene": {"description": "Crea una nueva escena", "category": "Clips", "params": [{"name": "index", "type": "int", "default": -1}], "example": "create_scene(index=-1)"},
# Samples
"analyze_library": {"description": "Analiza todos los samples en la libreria de reggaeton", "category": "Samples", "params": [{"name": "force_reanalyze", "type": "bool", "default": False}], "example": "analyze_library(force_reanalyze=False)"},
"get_library_stats": {"description": "Obtiene estadisticas de la libreria analizada", "category": "Samples", "params": [], "example": "get_library_stats()"},
"get_similar_samples": {"description": "Encuentra samples similares usando embeddings", "category": "Samples", "params": [{"name": "sample_path", "type": "str"}, {"name": "top_n", "type": "int", "default": 10}], "example": "get_similar_samples(sample_path='...', top_n=10)"},
"find_samples_like_audio": {"description": "Encuentra samples similares a un audio externo", "category": "Samples", "params": [{"name": "audio_path", "type": "str"}, {"name": "top_n", "type": "int", "default": 20}, {"name": "role", "type": "str", "optional": True}], "example": "find_samples_like_audio(audio_path='...', top_n=20)"},
"get_user_sound_profile": {"description": "Obtiene el perfil de sonido del usuario", "category": "Samples", "params": [], "example": "get_user_sound_profile()"},
"get_recommended_samples": {"description": "Obtiene samples recomendados para un rol", "category": "Samples", "params": [{"name": "role", "type": "str", "optional": True}, {"name": "count", "type": "int", "default": 5}], "example": "get_recommended_samples(role='kick', count=5)"},
"compare_two_samples": {"description": "Compara dos samples y devuelve similitud", "category": "Samples", "params": [{"name": "path1", "type": "str"}, {"name": "path2", "type": "str"}], "example": "compare_two_samples(path1='...', path2='...')"},
"browse_library": {"description": "Navega la libreria con filtros", "category": "Samples", "params": [{"name": "pack", "type": "str", "optional": True}, {"name": "role", "type": "str", "optional": True}, {"name": "bpm_min", "type": "float", "default": 0}, {"name": "bpm_max", "type": "float", "default": 0}, {"name": "key", "type": "str", "optional": True}], "example": "browse_library(role='kick', bpm_min=90, bpm_max=100)"},
# Mixing
"create_bus_track": {"description": "Crea un grupo (bus) para mezcla", "category": "Mixing", "params": [{"name": "bus_type", "type": "str", "default": "Group"}], "example": "create_bus_track(bus_type='Drums')"},
"route_track_to_bus": {"description": "Rutea una pista a un bus/grupo", "category": "Mixing", "params": [{"name": "track_index", "type": "int"}, {"name": "bus_name", "type": "str"}], "example": "route_track_to_bus(track_index=0, bus_name='Drums')"},
"create_return_track": {"description": "Crea una pista de retorno con efecto", "category": "Mixing", "params": [{"name": "effect_type", "type": "str", "default": "Reverb"}], "example": "create_return_track(effect_type='Reverb')"},
"set_track_send": {"description": "Configura envio a pista de retorno (0.0-1.0)", "category": "Mixing", "params": [{"name": "track_index", "type": "int"}, {"name": "return_index", "type": "int"}, {"name": "amount", "type": "float", "range": "0.0-1.0"}], "example": "set_track_send(track_index=0, return_index=0, amount=0.3)"},
"insert_device": {"description": "Inserta un dispositivo/plugin en una pista", "category": "Mixing", "params": [{"name": "track_index", "type": "int"}, {"name": "device_name", "type": "str"}], "example": "insert_device(track_index=0, device_name='EQ Eight')"},
"configure_eq": {"description": "Configura EQ Eight en una pista", "category": "Mixing", "params": [{"name": "track_index", "type": "int"}, {"name": "preset", "type": "str", "default": "default"}], "example": "configure_eq(track_index=0, preset='kick_boost')"},
"configure_compressor": {"description": "Configura compresor en una pista", "category": "Mixing", "params": [{"name": "track_index", "type": "int"}, {"name": "preset", "type": "str", "default": "default"}, {"name": "threshold", "type": "float", "default": -20.0}, {"name": "ratio", "type": "float", "default": 4.0}], "example": "configure_compressor(track_index=1, threshold=-20.0, ratio=4.0)"},
"setup_sidechain": {"description": "Configura compresion sidechain", "category": "Mixing", "params": [{"name": "source_track", "type": "int"}, {"name": "target_track", "type": "int"}, {"name": "amount", "type": "float", "range": "0.0-1.0"}], "example": "setup_sidechain(source_track=0, target_track=1, amount=0.5)"},
"auto_gain_staging": {"description": "Ajusta automaticamente niveles de ganancia", "category": "Mixing", "params": [], "example": "auto_gain_staging()"},
"apply_master_chain": {"description": "Aplica cadena de mastering al master", "category": "Mixing", "params": [{"name": "preset", "type": "str", "default": "standard"}], "example": "apply_master_chain(preset='reggaeton_streaming')"},
# Arrangement
"create_arrangement_audio_pattern": {"description": "Crea clips de audio en Arrangement View", "category": "Arrangement", "params": [{"name": "track_index", "type": "int"}, {"name": "file_path", "type": "str"}, {"name": "positions", "type": "list", "default": [0]}, {"name": "name", "type": "str", "optional": True}], "example": "create_arrangement_audio_pattern(track_index=0, file_path='...', positions=[0, 4, 8])"},
"load_sample_to_clip": {"description": "Carga sample en clip de Session View", "category": "Arrangement", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int"}, {"name": "sample_path", "type": "str"}], "example": "load_sample_to_clip(track_index=0, clip_index=0, sample_path='...')"},
"duplicate_clip": {"description": "Duplica un clip a otro slot de Session View", "category": "Arrangement", "params": [{"name": "source_track", "type": "int"}, {"name": "source_clip", "type": "int"}, {"name": "target_track", "type": "int"}, {"name": "target_clip", "type": "int"}], "example": "duplicate_clip(source_track=0, source_clip=0, target_track=0, target_clip=1)"},
"load_sample_to_drum_rack": {"description": "Carga sample en pad de Drum Rack", "category": "Arrangement", "params": [{"name": "track_index", "type": "int"}, {"name": "sample_path", "type": "str"}, {"name": "pad_note", "type": "int", "default": 36}], "example": "load_sample_to_drum_rack(track_index=0, sample_path='...', pad_note=36)"},
"set_warp_markers": {"description": "Configura marcadores de warp", "category": "Arrangement", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int"}, {"name": "markers", "type": "list"}], "example": "set_warp_markers(track_index=0, clip_index=0, markers=[...])"},
"reverse_clip": {"description": "Invierte un clip", "category": "Arrangement", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int"}], "example": "reverse_clip(track_index=0, clip_index=0)"},
"pitch_shift_clip": {"description": "Cambia tono de clip (-24 a +24 semitonos)", "category": "Arrangement", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int"}, {"name": "semitones", "type": "float", "range": "-24 a +24"}], "example": "pitch_shift_clip(track_index=0, clip_index=0, semitones=-2)"},
"time_stretch_clip": {"description": "Estira tiempo de clip (0.25x a 4.0x)", "category": "Arrangement", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int"}, {"name": "factor", "type": "float", "range": "0.25-4.0"}], "example": "time_stretch_clip(track_index=0, clip_index=0, factor=1.5)"},
"slice_clip": {"description": "Divide clip en segmentos (2-64)", "category": "Arrangement", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int"}, {"name": "num_slices", "type": "int", "default": 8}], "example": "slice_clip(track_index=0, clip_index=0, num_slices=8)"},
# Production
"generate_track": {"description": "Genera una pista con IA", "category": "Production", "params": [{"name": "genre", "type": "str"}, {"name": "style", "type": "str", "optional": True}, {"name": "bpm", "type": "float", "default": 0}, {"name": "key", "type": "str", "optional": True}, {"name": "structure", "type": "str", "default": "standard"}], "example": "generate_track(genre='reggaeton', bpm=95, key='Am')"},
"generate_song": {"description": "Genera cancion completa con IA", "category": "Production", "params": [{"name": "genre", "type": "str"}, {"name": "style", "type": "str", "optional": True}, {"name": "bpm", "type": "float", "default": 0}, {"name": "key", "type": "str", "optional": True}, {"name": "structure", "type": "str", "default": "standard"}], "example": "generate_song(genre='reggaeton', bpm=95, key='Am')"},
"select_samples_for_genre": {"description": "Selecciona samples para un genero", "category": "Production", "params": [{"name": "genre", "type": "str"}, {"name": "key", "type": "str", "optional": True}, {"name": "bpm", "type": "float", "default": 0}], "example": "select_samples_for_genre(genre='reggaeton', key='Am', bpm=95)"},
"generate_complete_reggaeton": {"description": "Genera proyecto completo de reggaeton", "category": "Production", "params": [{"name": "bpm", "type": "float", "default": 95}, {"name": "key", "type": "str", "default": "Am"}, {"name": "style", "type": "str", "default": "classic"}, {"name": "structure", "type": "str", "default": "verse-chorus"}, {"name": "use_samples", "type": "bool", "default": True}], "example": "generate_complete_reggaeton(bpm=95, key='Am', style='classic')"},
"generate_from_reference": {"description": "Genera track desde audio de referencia", "category": "Production", "params": [{"name": "reference_audio_path", "type": "str"}], "example": "generate_from_reference(reference_audio_path='...')"},
"produce_reggaeton": {"description": "Pipeline completo de produccion reggaeton", "category": "Production", "params": [{"name": "bpm", "type": "float", "default": 95}, {"name": "key", "type": "str", "default": "Am"}, {"name": "style", "type": "str", "default": "classic"}, {"name": "structure", "type": "str", "default": "verse-chorus"}], "example": "produce_reggaeton(bpm=95, key='Am', style='classic', structure='verse-chorus')"},
"produce_from_reference": {"description": "Genera produccion desde referencia", "category": "Production", "params": [{"name": "audio_path", "type": "str"}], "example": "produce_from_reference(audio_path='...')"},
"produce_arrangement": {"description": "Genera produccion en Arrangement View", "category": "Production", "params": [{"name": "bpm", "type": "float", "default": 95}, {"name": "key", "type": "str", "default": "Am"}, {"name": "style", "type": "str", "default": "classic"}], "example": "produce_arrangement(bpm=95, key='Am', style='classic')"},
"complete_production": {"description": "Pipeline completo con renderizado", "category": "Production", "params": [{"name": "bpm", "type": "float", "default": 95}, {"name": "key", "type": "str", "default": "Am"}, {"name": "style", "type": "str", "default": "classic"}, {"name": "output_dir", "type": "str", "optional": True}], "example": "complete_production(bpm=95, key='Am', style='classic')"},
"batch_produce": {"description": "Produce multiples canciones en lote", "category": "Production", "params": [{"name": "count", "type": "int", "default": 3}, {"name": "style", "type": "str", "default": "classic"}, {"name": "bpm_range", "type": "str", "default": "90-100"}], "example": "batch_produce(count=3, style='classic', bpm_range='90-100')"},
"generate_midi_clip": {"description": "Crea clip MIDI con notas especificas", "category": "Production", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int", "default": 0}, {"name": "notes", "type": "list", "optional": True}], "example": "generate_midi_clip(track_index=0, clip_index=0, notes=[...])"},
"generate_dembow_clip": {"description": "Genera clip MIDI con patron dembow", "category": "Production", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int", "default": 0}, {"name": "bars", "type": "int", "default": 4}, {"name": "variation", "type": "str", "default": "standard"}], "example": "generate_dembow_clip(track_index=0, clip_index=0, bars=4, variation='standard')"},
"generate_bass_clip": {"description": "Genera clip MIDI de bajo reggaeton", "category": "Production", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int", "default": 0}, {"name": "bars", "type": "int", "default": 4}, {"name": "root_notes", "type": "list", "optional": True}, {"name": "style", "type": "str", "default": "standard"}], "example": "generate_bass_clip(track_index=1, clip_index=0, bars=4, style='standard')"},
"generate_chords_clip": {"description": "Genera clip MIDI de acordes", "category": "Production", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int", "default": 0}, {"name": "bars", "type": "int", "default": 4}, {"name": "progression", "type": "str", "default": "i-v-vi-iv"}, {"name": "key", "type": "str", "default": "Am"}], "example": "generate_chords_clip(track_index=2, clip_index=0, bars=4, progression='i-v-vi-iv', key='Am')"},
"generate_melody_clip": {"description": "Genera clip MIDI de melodia", "category": "Production", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int", "default": 0}, {"name": "bars", "type": "int", "default": 4}, {"name": "scale", "type": "str", "default": "minor"}, {"name": "density", "type": "str", "default": "medium"}], "example": "generate_melody_clip(track_index=3, clip_index=0, bars=4, scale='minor', density='medium')"},
"load_samples_for_genre": {"description": "Selecciona y carga samples para genero", "category": "Production", "params": [{"name": "genre", "type": "str"}, {"name": "key", "type": "str", "optional": True}, {"name": "bpm", "type": "float", "default": 0}], "example": "load_samples_for_genre(genre='reggaeton', key='Am', bpm=95)"},
"create_drum_kit": {"description": "Crea drum kit en Drum Rack", "category": "Production", "params": [{"name": "track_index", "type": "int"}, {"name": "kick_path", "type": "str", "optional": True}, {"name": "snare_path", "type": "str", "optional": True}, {"name": "hat_path", "type": "str", "optional": True}, {"name": "clap_path", "type": "str", "optional": True}], "example": "create_drum_kit(track_index=0, kick_path='...', snare_path='...', hat_path='...', clap_path='...')"},
"build_track_from_samples": {"description": "Construye pista completa desde samples", "category": "Production", "params": [{"name": "track_type", "type": "str", "default": "drums"}, {"name": "sample_role", "type": "str", "default": "drums"}], "example": "build_track_from_samples(track_type='drums', sample_role='drums')"},
"generate_full_song": {"description": "Genera cancion completa con drums/bass/chords/melody", "category": "Production", "params": [{"name": "bpm", "type": "float", "default": 95}, {"name": "key", "type": "str", "default": "Am"}, {"name": "style", "type": "str", "default": "classic"}, {"name": "structure", "type": "str", "default": "standard"}], "example": "generate_full_song(bpm=95, key='Am', style='classic')"},
"generate_track_from_config": {"description": "Genera pista desde JSON config", "category": "Production", "params": [{"name": "track_config_json", "type": "str"}], "example": "generate_track_from_config(track_config_json='{\"type\":\"drums\",\"pattern\":\"dembow\",\"bars\":8}')"},
"generate_section": {"description": "Genera seccion de cancion desde JSON", "category": "Production", "params": [{"name": "section_config_json", "type": "str"}, {"name": "start_bar", "type": "int", "default": 0}], "example": "generate_section(section_config_json='{\"type\":\"verse\",\"bars\":16,\"elements\":[\"drums\",\"bass\"]}', start_bar=0)"},
"apply_human_feel": {"description": "Humaniza pista MIDI (0.0-1.0)", "category": "Production", "params": [{"name": "track_index", "type": "int"}, {"name": "intensity", "type": "float", "range": "0.0-1.0"}], "example": "apply_human_feel(track_index=0, intensity=0.3)"},
"add_percussion_fills": {"description": "Aniade fills de percusion", "category": "Production", "params": [{"name": "track_index", "type": "int"}, {"name": "positions", "type": "list", "default": [7, 15, 23, 31]}], "example": "add_percussion_fills(track_index=0, positions=[7, 15, 23, 31])"},
# Musical Intelligence
"analyze_project_key": {"description": "Detecta tonalidad del proyecto", "category": "Musical Intelligence", "params": [], "example": "analyze_project_key()"},
"harmonize_track": {"description": "Armoniza pista con progresion", "category": "Musical Intelligence", "params": [{"name": "track_index", "type": "int"}, {"name": "progression", "type": "str", "default": "I-V-vi-IV"}], "example": "harmonize_track(track_index=2, progression='I-V-vi-IV')"},
"generate_counter_melody": {"description": "Genera contra-melodia", "category": "Musical Intelligence", "params": [{"name": "main_melody_track", "type": "int"}], "example": "generate_counter_melody(main_melody_track=3)"},
"detect_energy_curve": {"description": "Analiza curva de energia por seccion", "category": "Musical Intelligence", "params": [], "example": "detect_energy_curve()"},
"balance_sections": {"description": "Ajusta energia entre secciones", "category": "Musical Intelligence", "params": [], "example": "balance_sections()"},
"variate_loop": {"description": "Crea variaciones de loop (0.0-1.0)", "category": "Musical Intelligence", "params": [{"name": "track_index", "type": "int"}, {"name": "intensity", "type": "float", "range": "0.0-1.0"}], "example": "variate_loop(track_index=0, intensity=0.5)"},
"add_call_and_response": {"description": "Genera respuesta musical a frase", "category": "Musical Intelligence", "params": [{"name": "phrase_track", "type": "int"}, {"name": "response_length", "type": "int", "default": 2}], "example": "add_call_and_response(phrase_track=3, response_length=2)"},
"generate_breakdown": {"description": "Genera seccion breakdown", "category": "Musical Intelligence", "params": [{"name": "start_bar", "type": "int"}, {"name": "duration", "type": "int", "default": 8}], "example": "generate_breakdown(start_bar=32, duration=8)"},
"generate_drop_variation": {"description": "Genera variacion de drop", "category": "Musical Intelligence", "params": [{"name": "original_drop_bar", "type": "int"}, {"name": "variation_type", "type": "str", "default": "intense"}], "example": "generate_drop_variation(original_drop_bar=16, variation_type='intense')"},
"create_outro": {"description": "Crea outro con fade out", "category": "Musical Intelligence", "params": [{"name": "fade_duration", "type": "int", "default": 8}], "example": "create_outro(fade_duration=8)"},
# Workflow
"export_project": {"description": "Exporta proyecto a archivo de audio", "category": "Workflow", "params": [{"name": "path", "type": "str"}, {"name": "format", "type": "str", "default": "wav"}], "example": "export_project(path='C:\\\\output.wav', format='wav')"},
"get_project_summary": {"description": "Obtiene resumen del proyecto", "category": "Workflow", "params": [], "example": "get_project_summary()"},
"suggest_improvements": {"description": "Sugerencias IA para mejorar proyecto", "category": "Workflow", "params": [], "example": "suggest_improvements()"},
"validate_project": {"description": "Valida consistencia del proyecto", "category": "Workflow", "params": [], "example": "validate_project()"},
"humanize_track": {"description": "Humaniza pista MIDI (0.0-1.0)", "category": "Workflow", "params": [{"name": "track_index", "type": "int"}, {"name": "intensity", "type": "float", "range": "0.0-1.0"}], "example": "humanize_track(track_index=0, intensity=0.5)"},
"load_preset": {"description": "Carga preset en proyecto", "category": "Workflow", "params": [{"name": "preset_name", "type": "str"}], "example": "load_preset(preset_name='reggaeton_basic')"},
"save_as_preset": {"description": "Guarda proyecto como preset", "category": "Workflow", "params": [{"name": "name", "type": "str"}, {"name": "description", "type": "str", "optional": True}], "example": "save_as_preset(name='mi_preset', description='Mi template de reggaeton')"},
"list_presets": {"description": "Lista presets disponibles", "category": "Workflow", "params": [], "example": "list_presets()"},
"create_custom_preset": {"description": "Crea preset personalizado", "category": "Workflow", "params": [{"name": "name", "type": "str"}, {"name": "description", "type": "str", "optional": True}], "example": "create_custom_preset(name='nuevo_preset', description='...')"},
"render_stems": {"description": "Renderiza stems individuales", "category": "Workflow", "params": [{"name": "output_dir", "type": "str"}], "example": "render_stems(output_dir='C:\\\\stems\\\\')"},
"render_full_mix": {"description": "Renderiza mix completo masterizado", "category": "Workflow", "params": [{"name": "output_path", "type": "str"}], "example": "render_full_mix(output_path='C:\\\\mix_final.wav')"},
"render_instrumental": {"description": "Renderiza version instrumental", "category": "Workflow", "params": [{"name": "output_path", "type": "str"}], "example": "render_instrumental(output_path='C:\\\\instrumental.wav')"},
"full_quality_check": {"description": "Verificacion de calidad completa", "category": "Workflow", "params": [], "example": "full_quality_check()"},
"fix_quality_issues": {"description": "Arregla problemas de calidad", "category": "Workflow", "params": [{"name": "issues", "type": "list", "optional": True}], "example": "fix_quality_issues(issues=[])"},
"duplicate_project": {"description": "Duplica proyecto con nuevo nombre", "category": "Workflow", "params": [{"name": "new_name", "type": "str"}], "example": "duplicate_project(new_name='mi_track_v2')"},
"create_radio_edit": {"description": "Crea version radio edit", "category": "Workflow", "params": [{"name": "output_path", "type": "str"}], "example": "create_radio_edit(output_path='C:\\\\radio_edit.wav')"},
"create_dj_edit": {"description": "Crea version DJ edit", "category": "Workflow", "params": [{"name": "output_path", "type": "str"}], "example": "create_dj_edit(output_path='C:\\\\dj_edit.wav')"},
"get_production_report": {"description": "Genera reporte completo de produccion", "category": "Workflow", "params": [], "example": "get_production_report()"},
# Diagnostics
"get_memory_usage": {"description": "Uso de memoria del sistema", "category": "Diagnostics", "params": [], "example": "get_memory_usage()"},
"get_progress_report": {"description": "Reporte de progreso del proyecto", "category": "Diagnostics", "params": [], "example": "get_progress_report()"},
# System
"ping": {"description": "Ping simple para verificar conectividad MCP", "category": "System", "params": [], "example": "ping()"},
"help": {"description": "Lista todas las tools o ayuda detallada de una tool", "category": "System", "params": [{"name": "tool_name", "type": "str", "optional": True}], "example": "help() o help(tool_name='produce_reggaeton')"},
"get_workflow_status": {"description": "Estado actual del workflow de produccion", "category": "System", "params": [], "example": "get_workflow_status()"},
"undo": {"description": "Deshace ultima accion", "category": "System", "params": [], "example": "undo()"},
"redo": {"description": "Rehace ultima accion deshecha", "category": "System", "params": [], "example": "redo()"},
"save_checkpoint": {"description": "Guarda checkpoint del proyecto", "category": "System", "params": [{"name": "name", "type": "str", "default": "auto"}], "example": "save_checkpoint(name='antes_mejora')"},
"set_multiple_progressions": {"description": "Configura progresiones para multiples secciones", "category": "System", "params": [{"name": "progressions_config", "type": "list"}], "example": "set_multiple_progressions(progressions_config=[...])"},
"modulate_key": {"description": "Modula a nueva tonalidad en seccion", "category": "System", "params": [{"name": "section_index", "type": "int"}, {"name": "new_key", "type": "str"}], "example": "modulate_key(section_index=2, new_key='Dm')"},
"enable_parallel_processing": {"description": "Activa/desactiva procesamiento paralelo", "category": "System", "params": [{"name": "enabled", "type": "bool", "default": True}], "example": "enable_parallel_processing(enabled=True)"},
}
# Si se proporciona tool_name, devolver ayuda detallada
if tool_name:
tool_name_lower = tool_name.lower()
matches = {k: v for k, v in tools_db.items() if k.lower() == tool_name_lower}
if not matches:
# Fuzzy match
matches = {k: v for k, v in tools_db.items() if tool_name_lower in k.lower()}
if not matches:
return _err(f"Tool '{tool_name}' not found. Use help() without arguments to see all tools.")
results = []
for name, info in matches.items():
params_str = ", ".join(
p["name"] + (" (optional)" if p.get("optional") else "") + ": " + p["type"]
for p in info.get("params", [])
)
results.append({
"name": name,
"description": info["description"],
"category": info["category"],
"parameters": params_str if params_str else "None",
"example": info["example"],
})
return _ok({"tool_help": results[0] if len(results) == 1 else results})
# Sin tool_name: listar todas las tools organizadas por categoria
by_category = {}
for name, info in tools_db.items():
cat = info["category"]
if cat not in by_category:
by_category[cat] = []
by_category[cat].append({"name": name, "description": info["description"]})
return _ok({
"total_tools": len(tools_db),
"categories": sorted(by_category.keys()),
"tools_by_category": by_category,
"usage": "Use help(tool_name='toolname') for detailed help on a specific tool.",
})
@mcp.tool()
def get_workflow_status(ctx: Context) -> str:
"""Obtiene el estado actual del workflow de produccion con proximos pasos accionables (T100).
Returna:
- Estado actual del proyecto (tracks, clips, scenes)
- Configuracion de mezcla
- Contenido del arrangement
- Proximos pasos recomendados
"""
try:
# Get session info
session_resp = _send_to_ableton("get_session_info", timeout=TIMEOUTS["get_session_info"])
session_data = {}
if session_resp.get("status") == "success":
r = session_resp.get("result", {})
session_data = {
"tempo": r.get("tempo"),
"num_tracks": r.get("num_tracks", 0),
"num_scenes": r.get("num_scenes", 0),
"is_playing": r.get("is_playing", False),
"current_song_time": r.get("current_song_time", 0),
}
# Get tracks detail
tracks_resp = _send_to_ableton("get_tracks", timeout=TIMEOUTS["get_tracks"])
tracks_data = {}
has_mixing_config = False
has_arrangement_content = False
if tracks_resp.get("status") == "success":
tracks = _ableton_result(tracks_resp).get("tracks", [])
tracks_data = {
"count": len(tracks),
"midi_tracks": len([t for t in tracks if t.get("type") == "midi"]),
"audio_tracks": len([t for t in tracks if t.get("type") == "audio"]),
"track_names": [t.get("name", "") for t in tracks],
"muted": [t.get("name", "") for t in tracks if t.get("mute")],
"soloed": [t.get("name", "") for t in tracks if t.get("solo")],
}
# Check if mixing is configured (return tracks, sends, etc.)
return_tracks = _ableton_result(tracks_resp).get("return_tracks", [])
has_mixing_config = len(return_tracks) > 0 or any(t.get("devices") for t in tracks)
# Check arrangement content
has_arrangement_content = any(t.get("arrangement_clips", 0) > 0 for t in tracks)
# Determine next steps based on current state
next_steps = []
num_tracks = session_data.get("num_tracks", 0)
if num_tracks == 0:
next_steps.append("1. Crear pistas: create_midi_track() o create_audio_track()")
next_steps.append("2. Generar contenido: produce_reggaeton(bpm=95, key='Am', style='classic')")
elif not has_arrangement_content:
next_steps.append("1. Generar clips en pistas: generate_dembow_clip(), generate_bass_clip(), etc.")
next_steps.append("2. O usar pipeline automatico: produce_reggaeton(bpm=95, key='Am')")
next_steps.append("3. O construir arrangement: produce_arrangement(bpm=95, key='Am')")
if num_tracks > 0 and not has_mixing_config:
next_steps.append("Configurar mezcla: create_bus_track(), configure_eq(), configure_compressor(), setup_sidechain()")
if num_tracks > 0 and has_arrangement_content:
next_steps.append("Verificar calidad: full_quality_check()")
next_steps.append("Humanizar: apply_human_feel(track_index=0, intensity=0.3)")
next_steps.append("Exportar: render_stems(output_dir='...'), render_full_mix(output_path='...')")
if not next_steps:
next_steps.append("Ejecutar health_check() para verificar estado del sistema")
next_steps.append("Usar produce_reggaeton() para iniciar produccion rapida")
return _ok({
"project_status": {
"tempo": session_data.get("tempo"),
"tracks": tracks_data,
"num_scenes": session_data.get("num_scenes", 0),
"is_playing": session_data.get("is_playing", False),
},
"mixing_configured": has_mixing_config,
"arrangement_has_content": has_arrangement_content,
"next_steps": next_steps,
})
except Exception as e:
return _err(f"Error getting workflow status: {str(e)}")
@mcp.tool()
def undo(ctx: Context) -> str:
"""Deshace la ultima accion (T098)."""
return _proxy_ableton_command("undo", timeout=TIMEOUTS["undo"])
@mcp.tool()
def redo(ctx: Context) -> str:
"""Rehace la ultima accion deshecha (T098)."""
return _proxy_ableton_command("redo", timeout=TIMEOUTS["redo"])
@mcp.tool()
def save_checkpoint(ctx: Context, name: str = "auto") -> str:
"""Guarda un checkpoint del proyecto actual (T099).
Args:
name: Nombre del checkpoint
"""
return _proxy_ableton_command(
"save_checkpoint",
{"name": name},
timeout=TIMEOUTS["save_checkpoint"],
defaults={"name": name},
)
@mcp.tool()
def get_production_report(ctx: Context) -> str:
"""Genera un reporte completo de produccion (T100)."""
try:
from engines.workflow_engine import WorkflowEngine
engine = WorkflowEngine()
result = engine.get_production_report()
return _ok({
"project_name": result.get("project_name", "Untitled"),
"duration": result.get("duration", "0:00"),
"total_tracks": result.get("total_tracks", 0),
"midi_clips": result.get("midi_clips", 0),
"audio_clips": result.get("audio_clips", 0),
"devices_used": result.get("devices", []),
"samples_used": result.get("samples", []),
"production_time": result.get("production_time", "unknown"),
"export_history": result.get("exports", []),
"quality_score": result.get("quality_score", 0)
})
except Exception as e:
return _err(f"Error getting production report: {str(e)}")
# ==================================================================
# EXTRAS (T086-T095)
# ==================================================================
@mcp.tool()
def set_multiple_progressions(ctx: Context, progressions_config: list) -> str:
"""Configura progresiones de acordes para multiples secciones (T086).
Args:
progressions_config: Lista de dicts con {"section": "intro", "progression": "I-V-vi-IV"}
"""
try:
from engines.musical_intelligence import MusicalIntelligenceEngine
engine = MusicalIntelligenceEngine()
result = engine.set_multiple_progressions(progressions_config)
return _ok({
"sections_configured": result.get("sections", []),
"progressions_applied": result.get("progressions", []),
"chords_generated": result.get("total_chords", 0)
})
except Exception as e:
return _err(f"Error setting progressions: {str(e)}")
@mcp.tool()
def modulate_key(ctx: Context, section_index: int, new_key: str) -> str:
"""Modula a una nueva key en una seccion especifica (T087).
Args:
section_index: Indice de la seccion
new_key: Nueva tonalidad (ej: "Dm", "F#m", "C")
"""
try:
from engines.musical_intelligence import MusicalIntelligenceEngine
engine = MusicalIntelligenceEngine()
result = engine.modulate_key(section_index, new_key)
return _ok({
"section_index": section_index,
"original_key": result.get("original_key"),
"new_key": new_key,
"modulation_type": result.get("modulation_type", "direct"),
"tracks_affected": result.get("tracks_affected", [])
})
except Exception as e:
return _err(f"Error modulating key: {str(e)}")
@mcp.tool()
def enable_parallel_processing(ctx: Context, enabled: bool = True) -> str:
"""Activa/desactiva procesamiento paralelo para operaciones pesadas (T092).
Args:
enabled: True para activar, False para desactivar
"""
try:
from engines.workflow_engine import WorkflowEngine
engine = WorkflowEngine()
result = engine.set_parallel_processing(enabled)
return _ok({
"parallel_processing": enabled,
"max_workers": result.get("max_workers", 4),
"affected_operations": result.get("operations", ["render", "analyze", "generate"])
})
except Exception as e:
return _err(f"Error setting parallel processing: {str(e)}")
@mcp.tool()
def get_memory_usage(ctx: Context) -> str:
"""Obtiene el uso de memoria del sistema y del proyecto (T094)."""
try:
import psutil
process = psutil.Process()
system_memory = psutil.virtual_memory()
return _ok({
"process_memory_mb": process.memory_info().rss / 1024 / 1024,
"process_memory_percent": process.memory_percent(),
"system_total_mb": system_memory.total / 1024 / 1024,
"system_available_mb": system_memory.available / 1024 / 1024,
"system_percent_used": system_memory.percent,
"live_processes": len([p for p in psutil.process_iter() if "ableton" in p.name().lower()])
})
except ImportError:
return _err("psutil not available. Install with: pip install psutil")
except Exception as e:
return _err(f"Error getting memory usage: {str(e)}")
@mcp.tool()
def get_progress_report(ctx: Context) -> str:
"""Reporte detallado de progreso del proyecto actual (T095)."""
try:
from engines.workflow_engine import WorkflowEngine
engine = WorkflowEngine()
result = engine.get_progress_report()
return _ok({
"project_completion": result.get("completion", 0),
"phases_completed": result.get("phases_completed", []),
"current_phase": result.get("current_phase", "unknown"),
"tasks_done": result.get("tasks_done", 0),
"tasks_total": result.get("tasks_total", 0),
"time_invested": result.get("time_invested", "0h 0m"),
"milestones": result.get("milestones", [])
})
except Exception as e:
return _err(f"Error getting progress report: {str(e)}")
# ==================================================================
# PLAYBACK, ARRANGEMENT & LIBRARY TOOLS (core fixes)
# ==================================================================
@mcp.tool()
def fire_all_clips(ctx: Context, scene_index: int = 0, start_playback: bool = True) -> str:
"""Fire every clip in a Session View scene so you can hear what was created.
Call this immediately after any produce_* / generate_* command to start playback.
Without this, clips exist in Live but are silent (they need to be fired).
Args:
scene_index: Which scene row to fire (default 0 = first scene)
start_playback: Also call Start Playing on the transport (default True)
"""
return _proxy_ableton_command(
"fire_all_clips",
{"scene_index": scene_index, "start_playback": start_playback},
timeout=15.0,
)
@mcp.tool()
def record_to_arrangement(ctx: Context, duration_bars: int = 8) -> str:
"""Record Session View clips into Arrangement View so you can see and edit them.
Enables arrangement overdub, fires scene 0, records for `duration_bars` bars,
then stops and switches Ableton to Arrangement View automatically.
Args:
duration_bars: How many bars to record (default 8)
"""
return _proxy_ableton_command(
"record_to_arrangement",
{"duration_bars": duration_bars},
timeout=duration_bars * 4.0 + 30.0, # generous timeout
)
@mcp.tool()
def scan_library(ctx: Context, subfolder: str = "", extensions: list = None) -> str:
"""Scan the libreria/ sample library and return all available samples categorized by folder.
Use this to discover what samples are available before loading them.
Returns file paths you can use with load_sample_direct.
Args:
subfolder: Sub-folder to scan e.g. "reggaeton/kick" (default = all)
extensions: File extensions to include e.g. [".wav", ".mp3"] (default all audio)
"""
params = {"subfolder": subfolder}
if extensions:
params["extensions"] = extensions
return _proxy_ableton_command("scan_library", params, timeout=20.0)
@mcp.tool()
def load_sample_direct(ctx: Context, track_index: int, file_path: str,
slot_index: int = 0, warp: bool = True,
auto_fire: bool = False) -> str:
"""Load a sample from libreria/ directly onto a track by absolute file path.
This is the most reliable way to use your sample library — bypasses the
Live browser entirely. Works with any WAV, AIF, or MP3 file.
Args:
track_index: Track index in Ableton (0-based)
file_path: Absolute path OR path relative to libreria/ root
slot_index: Clip slot index (default 0)
warp: Enable warping/tempo-sync (default True)
auto_fire: Fire the clip immediately after loading (default False)
"""
return _proxy_ableton_command(
"load_sample_direct",
{
"track_index": track_index,
"file_path": file_path,
"slot_index": slot_index,
"warp": warp,
"auto_fire": auto_fire,
},
timeout=20.0,
)
@mcp.tool()
def produce_with_library(ctx: Context, genre: str = "reggaeton", tempo: int = 95,
key: str = "Am", bars: int = 16,
auto_play: bool = True,
record_arrangement: bool = True) -> str:
"""Complete one-shot music production using your real 511-sample library (Session View).
DEPRECATED: Consider using build_arrangement_timeline() for direct Arrangement View creation.
This tool creates content in Session View, which is Ableton's clip-launching paradigm.
For direct timeline-based composition, use build_arrangement_timeline() instead.
What it does:
1. Sets project tempo
2. Loads real drum samples (kick, snare, clap, hihat) from libreria/
3. Loads bass samples from libreria/
4. Generates a MIDI dembow drum pattern
5. Generates a MIDI bass line
6. Generates chord progression
7. Records to Arrangement View (if record_arrangement=True)
8. Fires all clips so you hear the result immediately
MIGRATION GUIDE:
- OLD (Session View): produce_with_library() → Clips in Session View, optionally recorded
- NEW (Arrangement): build_arrangement_timeline() → Direct timeline placement
- For timeline-based composition with precise bar positioning, use build_arrangement_timeline()
Args:
genre: Genre for sample selection, e.g. "reggaeton" (default "reggaeton")
tempo: BPM (default 95)
key: Musical key e.g. "Am", "Cm", "Gm" (default "Am")
bars: Pattern length in bars (default 16)
auto_play: Start playback immediately after building (default True)
record_arrangement: Also record to Arrangement View (default True — changed from False)
"""
return _proxy_ableton_command(
"produce_with_library",
{
"genre": genre,
"tempo": tempo,
"key": key,
"bars": bars,
"auto_play": auto_play,
"record_arrangement": record_arrangement,
},
timeout=120.0,
)
@mcp.tool()
def build_song(ctx: Context,
genre: str = "reggaeton",
tempo: int = 95,
key: str = "Am",
style: str = "standard",
auto_record: bool = True) -> str:
"""Build a complete, intelligent song arrangement in Ableton Arrangement View.
*** USE THIS TOOL TO CREATE MUSIC — it's the definitive production command. ***
What it does automatically:
- Scans your libreria/ sample library (511 samples)
- Creates Kick, Snare, HiHat, Perc, Bass audio tracks with REAL samples
- Creates Dembow, Bass MIDI, Chords, Melody MIDI tracks with generated patterns
- Builds 5 song sections (Intro/Verse/Chorus/Bridge/Outro) each with different
clip variations (sparse intro, full chorus with melody, etc.)
- Records all sections to Arrangement View automatically section by section
- Switches Ableton to Arrangement View when done
The recording takes approximately:
4+8+8+4+4 = 28 bars × (60/tempo × 4) seconds per bar
At 95 BPM: ~70 seconds total recording time.
Ableton will show clips appearing in the Arrangement as it records.
Args:
genre: "reggaeton" (default) — which library folder to use for samples
tempo: Song BPM (default 95)
key: Musical key e.g. "Am", "Cm", "Gm" (default "Am")
style: Pattern style — "standard", "minimal", or "trap" (default "standard")
auto_record: Record to Arrangement View automatically (default True)
"""
return _proxy_ableton_command(
"build_song",
{
"genre": genre,
"tempo": tempo,
"key": key,
"style": style,
"auto_record": auto_record,
},
timeout=300.0, # 5 min — enough for 28-bar recording at any tempo
)
@mcp.tool()
def produce_13_scenes(ctx: Context,
genre: str = "reggaeton",
tempo: int = 95,
key: str = "Am",
auto_play: bool = True,
record_arrangement: bool = True) -> str:
"""Sprint 7: Produce complete track with 13 scenes and 100+ unique samples.
Uses the advanced sample rotation system with:
- Energy-based sample filtering (soft/medium/hard)
- Usage tracking to avoid consecutive repetition
- 658 SentimientoLatino2025 samples (26 kicks, 26 snares, 34 drumloops,
34 percs, 24 fx, 84 oneshots)
- 13 complete scenes with specific flags (riser, impact, ambience, etc.)
Scene Structure:
1. Intro (4 bars, energy 0.20) - pad + ambience, no drums
2. Verse A (8 bars, energy 0.50) - full drums + bass
3. Verse B (8 bars, energy 0.60) - drums + bass + lead
4. Pre-Chorus (4 bars, energy 0.75) - riser + anticipation
5. Chorus A (8 bars, energy 0.95) - full arrangement + impact
6. Chorus B (8 bars, energy 0.90) - alternative progression
7. Verse C (8 bars, energy 0.55) - variation, sparse drums
8. Chorus C (8 bars, energy 0.95) - rising intensity
9. Bridge (4 bars, energy 0.40) - dark, modal borrowing
10. Build Up (4 bars, energy 0.80) - crescendo + riser
11. Final Chorus (8 bars, energy 1.00) - all layers, maximum impact
12. Outro (4 bars, energy 0.30) - fade out elements
13. End (2 bars, energy 0.00) - silence
Args:
genre: Genre for sample selection (default "reggaeton")
tempo: BPM (default 95)
key: Musical key e.g. "Am", "Cm", "Gm" (default "Am")
auto_play: Start playback immediately after building (default True)
record_arrangement: Also record to Arrangement View (default True)
"""
return _proxy_ableton_command(
"produce_13_scenes",
{
"genre": genre,
"tempo": tempo,
"key": key,
"auto_play": auto_play,
"record_arrangement": record_arrangement,
},
timeout=300.0, # 5 min for 13 scenes recording
)
@mcp.tool()
def get_recording_status(ctx: Context) -> str:
"""Check the progress of an in-progress arrangement recording.
Use this to poll while build_song or record_to_arrangement is running.
Returns current section name, phase, and seconds remaining in this section.
"""
return _proxy_ableton_command("get_recording_status", {}, timeout=5.0)
@mcp.tool()
def stop_recording(ctx: Context) -> str:
"""Stop any in-progress arrangement recording immediately.
Disables overdub, stops playback, and switches to Arrangement View.
Use this if you need to abort a build_song recording.
"""
return _proxy_ableton_command("stop_all_playback", {}, timeout=10.0)
# ==================================================================
# ARRANGEMENT-FIRST TOOLS (Direct timeline composition)
# ==================================================================
# These tools bypass Session View and create content directly in
# Arrangement View for timeline-based music production.
@mcp.tool()
def build_arrangement_timeline(ctx: Context,
sections_json: str,
genre: str = "reggaeton",
tempo: int = 95,
key: str = "Am",
style: str = "standard") -> str:
"""Build a complete song directly in Arrangement View.
*** PREFERRED TOOL FOR TIMELINE-BASED COMPOSITION ***
This is the ARRANGEMENT-FIRST alternative to produce_with_library().
Instead of creating clips in Session View first, this tool places
content directly on the Arrangement timeline at specified bar positions.
MIGRATION GUIDE from Session View workflow:
- OLD: produce_with_library() → Session View clips → record to arrangement
- NEW: build_arrangement_timeline() → Direct Arrangement View placement
sections_json format example:
[
{
"name": "Intro",
"start_bar": 0,
"duration_bars": 4,
"tracks": [
{"type": "drums", "variation": "minimal"},
{"type": "bass", "variation": "sparse"}
]
},
{
"name": "Verse",
"start_bar": 4,
"duration_bars": 16,
"tracks": [
{"type": "drums", "variation": "full"},
{"type": "bass", "variation": "standard"},
{"type": "chords", "variation": "i-v-vi-iv"}
]
},
{
"name": "Chorus",
"start_bar": 20,
"duration_bars": 8,
"tracks": [
{"type": "drums", "variation": "full"},
{"type": "bass", "variation": "melodic"},
{"type": "chords", "variation": "i-v-vi-iv"},
{"type": "melody", "variation": "lead"}
]
}
]
Track types: drums, bass, chords, melody, fx, perc
Variations:
- drums: minimal, standard, full, fill
- bass: sparse, standard, melodic, staccato
- chords: i-v-vi-iv, i-iv-v, i-vi-iv-v
- melody: sparse, medium, dense, lead
Args:
sections_json: JSON string defining song sections with bar positions
genre: Genre for sample selection (default "reggaeton")
tempo: BPM (default 95)
key: Musical key e.g. "Am", "Cm", "Gm" (default "Am")
style: Pattern style — "standard", "minimal", "trap" (default "standard")
Returns:
JSON with arrangement summary including section positions and tracks created.
"""
try:
import json as json_lib
sections = json_lib.loads(sections_json)
# Validate sections
if not isinstance(sections, list) or len(sections) == 0:
return _err("sections_json must be a non-empty list of section objects")
created_tracks = []
created_sections = []
# Create tracks first
track_types = set()
for section in sections:
for track in section.get("tracks", []):
track_types.add(track.get("type", "drums"))
# Create each track in Arrangement View
for track_type in track_types:
track_result = _send_to_ableton(
"create_arrangement_track",
{"track_type": track_type, "name": f"{track_type.title()} Arr"},
timeout=15.0
)
if track_result.get("status") == "success":
created_tracks.append({
"type": track_type,
"index": track_result.get("result", {}).get("track_index", -1)
})
# Create sections at their bar positions
for section in sections:
section_name = section.get("name", "Section")
start_bar = section.get("start_bar", 0)
duration = section.get("duration_bars", 8)
section_tracks = []
for track_def in section.get("tracks", []):
track_type = track_def.get("type", "drums")
variation = track_def.get("variation", "standard")
# Find the track index for this type
track_index = None
for t in created_tracks:
if t["type"] == track_type:
track_index = t["index"]
break
if track_index is not None:
# Create section content
resp = _send_to_ableton(
"create_section_at_bar",
{
"track_index": track_index,
"section_type": section_name.lower(),
"at_bar": start_bar,
"duration_bars": duration,
"key": key,
"variation": variation
},
timeout=30.0
)
if resp.get("status") == "success":
section_tracks.append({
"type": track_type,
"variation": variation,
"track_index": track_index
})
created_sections.append({
"name": section_name,
"start_bar": start_bar,
"duration_bars": duration,
"tracks": section_tracks
})
return _ok({
"arrangement_type": "timeline_direct",
"genre": genre,
"tempo": tempo,
"key": key,
"style": style,
"tracks_created": len(created_tracks),
"sections_created": len(created_sections),
"section_details": created_sections,
"view": "Arrangement",
"note": "Content created directly in Arrangement View (not Session View)"
})
except json_lib.JSONDecodeError as e:
return _err(f"Invalid JSON in sections_json: {str(e)}")
except Exception as e:
logger.exception("build_arrangement_timeline: failed")
return _err(f"Error building arrangement timeline: {str(e)}")
@mcp.tool()
def create_section_at_bar(ctx: Context,
track_index: int,
section_type: str,
at_bar: float,
duration_bars: float = 8,
key: str = "Am") -> str:
"""Create a song section (intro/verse/chorus/bridge/outro) at specific bar position.
Creates content directly in Arrangement View at the specified bar position.
This is a building block for timeline-based composition.
Section types and their characteristics:
- intro: Sparse arrangement, minimal drums, building elements
- verse: Full drums, bass, chords; moderate energy
- chorus: Full arrangement with melody, highest energy
- bridge: Different progression, transitional energy
- outro: Fading elements, breakdown
- build: Rising energy, preparing for drop
- drop: Maximum impact, all elements
Args:
track_index: Index of the target track
section_type: Type of section — intro, verse, chorus, bridge, outro, build, drop
at_bar: Starting bar position in the arrangement
duration_bars: Length of the section in bars (default 8)
key: Musical key for harmonic content (default "Am")
Returns:
JSON with section creation status and clip details.
"""
# Map section types to content generation parameters
section_configs = {
"intro": {"density": "sparse", "variation": "minimal"},
"verse": {"density": "medium", "variation": "standard"},
"chorus": {"density": "full", "variation": "full"},
"bridge": {"density": "medium", "variation": "melodic"},
"outro": {"density": "sparse", "variation": "fade"},
"build": {"density": "building", "variation": "rising"},
"drop": {"density": "maximum", "variation": "impact"},
}
config = section_configs.get(section_type.lower(), section_configs["verse"])
try:
resp = _send_to_ableton(
"create_section_at_bar",
{
"track_index": track_index,
"section_type": section_type.lower(),
"at_bar": at_bar,
"duration_bars": duration_bars,
"key": key,
"density": config["density"],
"variation": config["variation"]
},
timeout=120.0
)
if resp.get("status") == "success":
return _ok({
"track_index": track_index,
"section_type": section_type,
"at_bar": at_bar,
"duration_bars": duration_bars,
"key": key,
"config": config,
"view": "Arrangement",
"message": f"Created {section_type} at bar {at_bar} on track {track_index}"
})
return _err(resp.get("message", f"Failed to create {section_type} at bar {at_bar}"))
except Exception as e:
logger.exception("create_section_at_bar: failed")
return _err(f"Error creating section: {str(e)}")
@mcp.tool()
def create_arrangement_track(ctx: Context,
track_type: str,
name: str = None,
insert_at_bar: float = 0) -> str:
"""Create a new track directly in Arrangement View.
Creates a track specifically for timeline-based arrangement composition.
The track is ready for clips to be placed at specific bar positions.
Track types and their purposes:
- drums: Drum patterns, percussive elements
- bass: Basslines, low-frequency content
- chords: Harmonic content, pads, rhythmic chords
- melody: Lead lines, melodic elements
- fx: Effects, risers, impacts, transitions
- perc: Additional percussion layers
Args:
track_type: Type of track — drums, bass, chords, melody, fx, perc
name: Optional custom name for the track (default: auto-generated from type)
insert_at_bar: Position hint for initial track focus (default 0)
Returns:
JSON with track creation status and track index.
"""
try:
# Auto-generate name if not provided
if name is None:
name = f"{track_type.title()} Arr"
resp = _send_to_ableton(
"create_arrangement_track",
{
"track_type": track_type,
"name": name,
"insert_at_bar": insert_at_bar
},
timeout=15.0
)
if resp.get("status") == "success":
result = resp.get("result", {})
return _ok({
"track_index": result.get("track_index", -1),
"track_type": track_type,
"name": name,
"view": "Arrangement",
"message": f"Created {track_type} track '{name}' at index {result.get('track_index', -1)}"
})
return _err(resp.get("message", f"Failed to create {track_type} track"))
except Exception as e:
logger.exception("create_arrangement_track: failed")
return _err(f"Error creating arrangement track: {str(e)}")
@mcp.tool()
def get_arrangement_status(ctx: Context) -> str:
"""Get detailed status of Arrangement View content.
Returns information about all clips currently in the Arrangement View,
including their positions, lengths, and track assignments.
Use this to inspect the current timeline composition state.
Returns:
JSON with arrangement details:
- total_clips: Number of clips in arrangement
- arrangement_length_beats: Total length in beats
- unique_start_positions: Sorted clip start points (bar map)
- clips: List of clip details with track, name, position, length
- tracks: Summary of tracks with clip counts
"""
try:
resp = _send_to_ableton(
"get_arrangement_clips",
{},
timeout=10.0
)
if resp.get("status") == "success":
result = resp.get("result", {})
return _ok({
"view": "Arrangement",
"total_clips": result.get("total_clips", 0),
"arrangement_length_beats": result.get("arrangement_length_beats", 0),
"unique_start_positions": result.get("unique_start_positions", []),
"clips": result.get("clips", []),
"tracks_summary": result.get("tracks_summary", {}),
"status": "ready" if result.get("total_clips", 0) > 0 else "empty"
})
return _err(resp.get("message", "Failed to get arrangement status"))
except Exception as e:
logger.exception("get_arrangement_status: failed")
return _err(f"Error getting arrangement status: {str(e)}")
# ------------------------------------------------------------------
# SESSION VS ARRANGEMENT MIGRATION NOTES
# ------------------------------------------------------------------
# OLD SESSION-VIEW-FIRST TOOLS (Deprecated patterns):
# - produce_with_library() → Creates Session clips, optionally records
# - produce_reggaeton() → Session View based
# - generate_*_clip() → Creates clips in Session View slots
#
# NEW ARRANGEMENT-FIRST TOOLS (Preferred):
# - build_arrangement_timeline() → Direct timeline composition
# - create_section_at_bar() → Place sections at specific bars
# - create_arrangement_track() → Create timeline-ready tracks
# - get_arrangement_status() → Inspect timeline state
# - generate_intelligent_track() → One-prompt professional track creation
#
# RECOMMENDED WORKFLOW:
# 1. Use build_arrangement_timeline() for complete songs
# 2. Use create_section_at_bar() for individual sections
# 3. Use create_arrangement_track() for custom track layouts
# 4. Use get_arrangement_status() to verify timeline content
# 5. Use generate_intelligent_track() for one-prompt music creation
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# INTELLIGENT TRACK GENERATION
# ------------------------------------------------------------------
@mcp.tool()
def generate_intelligent_track(ctx: Context,
description: str,
structure_type: str = "standard",
variation_level: str = "medium",
coherence_threshold: float = 0.90,
include_vocal_placeholder: bool = True,
surprise_mode: bool = False,
save_as_preset: bool = True) -> str:
"""Generate complete professional track with intelligent sample selection.
ONE-PROMPT MUSIC CREATION:
This tool creates a complete, professional-quality track from a single
description. It handles sample selection, coherence validation,
arrangement creation, and mixing automatically.
Args:
description: Natural language description of desired track.
Examples:
- "reggaeton perreo intenso 95bpm Am"
- "romantico suave 90bpm Gm con piano"
- "trap oscuro 140bpm Cm, agresivo"
structure_type: Song structure template.
Options: "tiktok" (30s), "short" (1min),
"standard" (3min), "extended" (4-5min)
variation_level: How much samples vary between sections.
"low" = same samples throughout
"medium" = subtle variations
"high" = distinct but coherent variations
coherence_threshold: Minimum professional coherence (0.0-1.0).
Default 0.90 (professional grade).
Will iterate until achieved or fail explicitly.
include_vocal_placeholder: Add empty track for vocals.
surprise_mode: If True, introduces controlled randomness
for unique but coherent results each time.
save_as_preset: Save the resulting kit as reusable preset.
Returns:
JSON with complete track info, coherence scores, rationale,
and preset name if saved.
Example:
generate_intelligent_track(
description="reggaeton perreo intenso 95bpm Am",
structure_type="standard",
variation_level="high",
coherence_threshold=0.90
)
"""
return _proxy_ableton_command(
"generate_intelligent_track",
{
"description": description,
"structure_type": structure_type,
"variation_level": variation_level,
"coherence_threshold": coherence_threshold,
"include_vocal_placeholder": include_vocal_placeholder,
"surprise_mode": surprise_mode,
"save_as_preset": save_as_preset,
},
timeout=300.0, # 5 minutes for full track generation
defaults={
"description": description,
"structure_type": structure_type,
}
)
@mcp.tool()
def generate_expansive_track(
ctx: Context,
description: str,
samples_per_category: int = 12,
variation_strategy: str = "combined",
coherence_threshold: float = 0.90,
structure_type: str = "standard"
) -> str:
"""Generate expansive track production with extensive sample library utilization.
Creates a complete track production using an expansive selection of samples
across multiple categories (drums, bass, synths, FX, vocals). This tool enables
rich, layered productions by pulling more samples per category than standard
track generation, providing greater variety and depth.
Args:
description: Natural language description of desired track.
Examples:
- "reggaeton perreo intenso 95bpm Am"
- "romantico suave 90bpm Gm con piano"
- "trap oscuro 140bpm Cm, agresivo"
samples_per_category: Number of samples to select per category (8-16).
Higher values create richer, more layered tracks.
Default 12 provides a good balance.
variation_strategy: How to vary samples across sections.
Options:
- "combined": Mix of all strategies (default)
- "sequential": Progressive sample addition
- "random": Random sample selection per section
- "coherent": Similar samples with subtle variations
coherence_threshold: Minimum professional coherence (0.80-0.95).
Default 0.90 ensures professional-grade consistency.
Lower values allow more experimental combinations.
structure_type: Song structure template.
Options: "tiktok" (30s), "short" (1min),
"standard" (3min), "extended" (4-5min)
Returns:
JSON with complete production details:
- status: "success" or "error"
- tracks_created: List of track names created
- samples_used: Dict mapping roles to lists of samples used
- coherence_scores: Dict mapping roles to coherence scores
- total_clips: Total number of clips created
- qa_score: Quality assurance score (0.0-1.0)
- message: Human-readable status message
Example:
generate_expansive_track(
description="reggaeton perreo intenso 95bpm Am",
samples_per_category=12,
variation_strategy="combined",
coherence_threshold=0.90,
structure_type="standard"
)
Expected output format:
{
"status": "success",
"tracks_created": ["Drums Track", "Bass Track", "Synth Track", "FX Track"],
"samples_used": {
"drums": ["kick_1.wav", "snare_1.wav", "hat_1.wav"],
"bass": ["bass_1.wav", "bass_2.wav"],
"synths": ["synth_1.wav", "synth_2.wav"],
"fx": ["riser_1.wav", "impact_1.wav"]
},
"coherence_scores": {
"drums": 0.92,
"bass": 0.88,
"synths": 0.90,
"fx": 0.85
},
"total_clips": 24,
"qa_score": 0.91,
"message": "Expansive track generated successfully with 48 samples across 4 categories"
}
"""
# Validate parameters
if not 8 <= samples_per_category <= 16:
return _err(f"Invalid samples_per_category: {samples_per_category}. Must be between 8-16.")
if not 0.80 <= coherence_threshold <= 0.95:
return _err(f"Invalid coherence_threshold: {coherence_threshold}. Must be between 0.80-0.95.")
valid_strategies = ["combined", "sequential", "random", "coherent"]
if variation_strategy not in valid_strategies:
return _err(f"Invalid variation_strategy: {variation_strategy}. Must be one of: {', '.join(valid_strategies)}")
try:
from engines.integration import get_integration_coordinator
coordinator = get_integration_coordinator()
result = coordinator.build_expansive_production(
description=description,
samples_per_category=samples_per_category,
variation_strategy=variation_strategy,
coherence_threshold=coherence_threshold,
structure_type=structure_type
)
# Format result
return _ok({
"status": "success" if result.get("success", False) else "error",
"tracks_created": result.get("tracks_created", []),
"samples_used": result.get("samples_used", {}),
"coherence_scores": result.get("coherence_scores", {}),
"total_clips": result.get("total_clips", 0),
"qa_score": result.get("qa_score", 0.0),
"message": result.get("message", "Expansive track generation completed")
})
except ImportError:
return _err("Integration coordinator not available. Ensure engines.integration module is installed.")
except AttributeError:
# Method not yet implemented in integration module
return _err("build_expansive_production method not yet implemented in integration coordinator.")
except Exception as e:
return _err(f"Error generating expansive track: {str(e)}")
# ------------------------------------------------------------------
# ARRANGEMENT INJECTION TOOLS
# ------------------------------------------------------------------
@mcp.tool()
def create_arrangement_audio_pattern(ctx: Context, track_index: int, file_path: str,
positions: str, name: str = "") -> str:
'''Create audio clips in Arrangement View directly from file.
Args:
track_index: Target track index
file_path: Absolute path to audio file
positions: JSON list of beat positions (e.g., "[0.0, 16.0, 32.0]")
name: Optional clip name
Returns:
JSON with created clip info
'''
try:
import json
pos_list = json.loads(positions)
if not isinstance(pos_list, list):
return _err("positions must be a JSON list of beat positions")
resp = _send_to_ableton(
"create_arrangement_audio_pattern",
{"track_index": track_index, "file_path": file_path,
"positions": pos_list, "name": name},
timeout=TIMEOUTS["create_arrangement_audio_pattern"]
)
if resp.get("status") == "success":
return _ok({
"track_index": track_index,
"file_path": file_path,
"positions": pos_list,
"clips_created": len(pos_list),
"name": name,
"view": "Arrangement",
})
return _err(resp.get("message", "Failed to create arrangement audio pattern"))
except json.JSONDecodeError:
return _err("Invalid JSON in positions parameter. Expected format: '[0.0, 16.0, 32.0]'")
except Exception as e:
return _err(f"Error creating arrangement audio pattern: {str(e)}")
# ------------------------------------------------------------------
# AUDIO ANALYSIS TOOLS
# ------------------------------------------------------------------
@mcp.tool()
def analyze_audio_file(ctx: Context, file_path: str) -> str:
'''Analyze audio file and extract features (BPM, key, spectral).
Args:
file_path: Absolute path to audio file
Returns:
JSON with AudioFeatures (bpm, key, duration, spectral features, etc.)
'''
try:
if not os.path.isfile(file_path):
return _err(f"Audio file not found: {file_path}")
from engines.audio_analyzer_dual import AudioAnalyzerDual
analyzer = AudioAnalyzerDual(backend="auto")
features = analyzer.analyze_sample(file_path)
# Convert AudioFeatures dataclass to dict
result = {
"file_path": file_path,
"bpm": features.bpm,
"key": features.key,
"duration": features.duration,
"spectral_centroid": features.spectral_centroid,
"spectral_rolloff": features.spectral_rolloff,
"zero_crossing_rate": features.zero_crossing_rate,
"rms_energy": features.rms_energy,
"key_confidence": features.key_confidence,
"sample_type": features.sample_type,
"is_harmonic": features.is_harmonic,
"is_percussive": features.is_percussive,
"suggested_genres": features.suggested_genres,
}
return _ok(result)
except ImportError:
return _err("Audio analyzer engine not available.")
except Exception as e:
return _err(f"Error analyzing audio file: {str(e)}")
# ------------------------------------------------------------------
# DIVERSITY & COHERENCE TOOLS
# ------------------------------------------------------------------
@mcp.tool()
def reset_diversity_memory(ctx: Context) -> str:
'''Reset cross-generation diversity memory for fresh session.
Returns:
Confirmation message
'''
try:
from engines.coherence_system import reset_all_memory
reset_all_memory()
return _ok({
"status": "success",
"message": "Diversity memory reset successfully. All generation history cleared.",
})
except ImportError:
return _err("Coherence system not available.")
except Exception as e:
return _err(f"Error resetting diversity memory: {str(e)}")
@mcp.tool()
def get_sample_fatigue_report(ctx: Context) -> str:
'''Get sample usage fatigue report.
Returns:
JSON with most used samples by role
'''
try:
from engines.coherence_system import get_coherence_memory_stats
stats = get_coherence_memory_stats()
return _ok({
"status": "success",
"report": stats,
})
except ImportError:
return _err("Coherence system not available.")
except Exception as e:
return _err(f"Error getting sample fatigue report: {str(e)}")
# ------------------------------------------------------------------
# PROFESSIONAL MIXING TOOLS
# ------------------------------------------------------------------
@mcp.tool()
def apply_professional_mix(ctx: Context, track_assignments: str) -> str:
'''Apply complete professional mix with buses and returns.
Args:
track_assignments: JSON dict mapping track indices to roles
(e.g., '{"0": "kick", "1": "snare", "2": "bass"}')
Returns:
JSON with applied mix configuration
'''
try:
import json
assignments = json.loads(track_assignments)
if not isinstance(assignments, dict):
return _err("track_assignments must be a JSON object mapping track indices to roles")
# Convert string keys to integers (JSON keys are always strings)
parsed_assignments = {}
for k, v in assignments.items():
try:
parsed_assignments[int(k)] = v
except ValueError:
return _err(f"Invalid track index: {k}. Must be an integer.")
from engines.bus_architecture import apply_professional_mix
from engines.tcp_client import get_ableton_connection
ableton_conn = get_ableton_connection()
if ableton_conn is None:
return _err("Unable to connect to Ableton Live")
result = apply_professional_mix(ableton_conn, parsed_assignments)
return _ok({
"status": "success",
"message": "Professional mix applied successfully",
"configuration": result,
"tracks_processed": len(parsed_assignments),
})
except json.JSONDecodeError:
return _err('Invalid JSON in track_assignments. Expected format: \'{"0": "kick", "1": "snare"}\'')
except ImportError as e:
return _err(f"Required engine not available: {str(e)}")
except Exception as e:
return _err(f"Error applying professional mix: {str(e)}")
# ------------------------------------------------------------------
# AGENTE 18: PROFESSIONAL WORKFLOW ORCHESTRATOR
# ------------------------------------------------------------------
@mcp.tool()
def produce_professional_track(ctx: Context, config_json: str) -> str:
"""Orquestador maestro de workflow profesional (Agente 18).
Ejecuta un pipeline completo de 5 pasos:
1. Crear tracks y estructura
2. Generar contenido por sección
3. Aplicar FX y transiciones
4. Aplicar mezcla profesional
5. Validación QA
Args:
config_json: JSON string con configuración completa. Ejemplo:
{
"genre": "reggaeton",
"style": "perreo",
"bpm": 95,
"key": "Am",
"duration": 128,
"structure": [
{"type": "intro", "bars": 8, "elements": ["drums", "bass"]},
{"type": "verse", "bars": 16, "elements": ["drums", "bass", "chords"]},
{"type": "chorus", "bars": 16, "elements": ["drums", "bass", "chords", "melody"]},
{"type": "bridge", "bars": 8, "elements": ["drums", "bass"]},
{"type": "outro", "bars": 8, "elements": ["drums", "bass"]}
],
"elements": ["drums", "bass", "chords", "melody", "fx"],
"mixing": {
"bus_architecture": True,
"parallel_comp": True,
"master_chain": True
}
}
Returns:
JSON con resultado completo del workflow incluyendo tracks creados,
secciones generadas, score de QA y duración.
"""
try:
from engines.professional_workflow import ProfessionalWorkflow
workflow = ProfessionalWorkflow()
result = workflow.produce_professional_track(config_json)
return _ok({
"success": result.get("success", False),
"step": result.get("step"),
"config": result.get("config"),
"tracks_created": result.get("tracks_created", []),
"tracks_count": result.get("tracks_count", 0),
"sections_created": result.get("sections_created", 0),
"buses_created": result.get("buses_created", 0),
"fx_applied": result.get("fx_applied", 0),
"qa_score": result.get("qa_score", 0.0),
"errors": result.get("errors", []),
"warnings": result.get("warnings", []),
"duration_seconds": result.get("duration_seconds", 0.0),
})
except ImportError:
return _err("Professional workflow engine not available.")
except Exception as e:
return _err(f"Error in professional workflow: {str(e)}")
# ==================================================================
# AGENTE 14: PROFESSIONAL MELODY ENGINE (MOTIVIC)
# ==================================================================
@mcp.tool()
def generate_motivic_melody(ctx: Context,
track_index: int,
scale: str = "minor",
bars: int = 8,
density: str = "medium",
variation_types: list = None,
phrase_structure: str = None,
contour: str = None,
root_pitch: int = 60,
seed: int = None) -> str:
"""Generate professional motivic melody with variations and phrase structures (Agente 14).
Creates sophisticated melodies using classical composition techniques:
- Theme/motive generation with scale-based melodic contours
- Variations: sequence (repetir a intervalo diferente), inversion (invertir intervalos),
retrograde (reversa), expansion/contraction
- Phrase structures: antecedent-consequent (pregunta-respuesta), period, sentence
- Melodic contour application: arch (subir y bajar), wave (múltiples picos),
step_wise (notas conjuntas)
Args:
track_index: Target track index for the melody
scale: Scale type - "minor", "major", "harmonic_minor", "melodic_minor",
"pentatonic_minor", "pentatonic_major", "dorian", "phrygian" (default "minor")
bars: Number of bars for the melody (default 8)
density: Note density - "sparse", "medium", "dense" (default "medium")
variation_types: List of variation types to apply - "sequence", "inversion",
"retrograde", "expansion", "contraction" (default None)
phrase_structure: Phrase structure type - "antecedent_consequent", "period",
"sentence" (default None)
contour: Melodic contour - "arch", "wave", "step_wise", "ascending",
"descending", "flat" (default None)
root_pitch: Root note pitch (MIDI note number) for melody center (default 60 = C4)
seed: Random seed for reproducible melodies (default None)
Returns:
JSON with melody generation results and metadata
"""
try:
from engines.melody_engine import generate_motivic_melody as engine_generate
# Generate melody using the engine
result = engine_generate(
scale=scale,
bars=bars,
variation_types=variation_types or [],
phrase_structure=phrase_structure,
contour=contour,
seed=seed
)
# Apply contour if specified (engine may have done it, but re-apply to be sure)
if contour and result.get("combined_notes"):
from engines.melody_engine import MelodyEngine, Note
engine = MelodyEngine()
# Convert dict notes back to Note objects for contour application
notes = [
Note(
pitch=n["pitch"],
duration=n["duration"],
velocity=n["velocity"],
start_time=n["start_time"]
)
for n in result["combined_notes"]
]
# Apply contour
contoured_notes = engine.apply_melodic_contour(notes, contour)
result["combined_notes"] = engine.notes_to_ableton_format(contoured_notes)
result["metadata"]["contour"] = contour
# Create clip and add notes to track
clip_resp = _send_to_ableton(
"create_clip",
{"track_index": track_index, "clip_index": 0, "length": float(bars * 4)},
timeout=TIMEOUTS["generate_motivic_melody"]
)
if clip_resp.get("status") != "success":
return _err(f"Failed to create clip: {clip_resp.get('message', 'Unknown error')}")
# Add notes to clip
notes_resp = _send_to_ableton(
"add_notes_to_clip",
{
"track_index": track_index,
"clip_index": 0,
"notes": result["combined_notes"]
},
timeout=TIMEOUTS["generate_motivic_melody"]
)
if notes_resp.get("status") != "success":
return _err(f"Failed to add notes: {notes_resp.get('message', 'Unknown error')}")
return _ok({
"track_index": track_index,
"scale": scale,
"bars": bars,
"density": density,
"theme_notes_count": len(result.get("theme", [])),
"variations_count": len(result.get("variations", [])),
"total_notes_added": len(result.get("combined_notes", [])),
"phrase_structure": phrase_structure,
"contour": contour,
"metadata": result.get("metadata", {}),
"variations": [{"type": v["type"], "note_count": len(v["notes"])}
for v in result.get("variations", [])],
})
except ImportError:
return _err("Melody engine not available. Ensure melody_engine.py is present.")
except ValueError as e:
return _err(f"Invalid parameter: {str(e)}")
except Exception as e:
return _err(f"Error generating motivic melody: {str(e)}")
# ==================================================================
# AGENTE 12: VST/AU PLUGIN SUPPORT
# ==================================================================
@mcp.tool()
def scan_vst_plugins(ctx: Context, force_rescan: bool = False) -> str:
"""Scan for installed VST/AU plugins.
Detects popular plugins like Serum, Massive, Sylenth1, FabFilter,
and ValhallaDSP plugins in the system.
Args:
force_rescan: Force a fresh scan even if cache exists
Returns:
JSON with scan results including installed plugin list and paths
"""
try:
from engines.vst_manager import get_vst_manager
manager = get_vst_manager()
result = manager.scan_vst_plugins(force_rescan=force_rescan)
return _ok(result)
except Exception as e:
return _err(f"Error scanning VST plugins: {str(e)}")
@mcp.tool()
def load_vst_plugin(ctx: Context, track_index: int, plugin_name: str, preset_name: str = "") -> str:
"""Load a VST/AU plugin on a track.
Supports popular plugins:
- Synths: Serum, Massive, Sylenth1
- Effects: FabFilter Pro-Q, Pro-C, Pro-R
- Reverb/Delay: ValhallaRoom, ValhallaVintageVerb, ValhallaDelay
Args:
track_index: Index of the target track
plugin_name: Name of the plugin (e.g., "Serum", "Massive", "Pro-Q")
preset_name: Optional preset to load after inserting plugin
Returns:
JSON with plugin load status and information
"""
try:
from engines.vst_manager import validate_plugin
# Validate plugin installation first
is_installed, message = validate_plugin(plugin_name)
if not is_installed:
return _err(f"Plugin '{plugin_name}' not found: {message}")
# Send command to Ableton to load the plugin
resp = _send_to_ableton(
"load_vst_plugin",
{
"track_index": track_index,
"plugin_name": plugin_name,
"preset_name": preset_name,
},
timeout=TIMEOUTS["load_vst_plugin"]
)
if resp.get("status") != "success":
return _err(resp.get("message", "Failed to load plugin"))
return _ok({
"plugin_loaded": True,
"plugin_name": plugin_name,
"track_index": track_index,
"preset_name": preset_name if preset_name else None,
"validation": message,
})
except Exception as e:
return _err(f"Error loading VST plugin: {str(e)}")
@mcp.tool()
def configure_vst_parameter(ctx: Context, track_index: int, plugin_index: int,
param_name: str, value: float) -> str:
"""Configure a parameter on a VST/AU plugin.
Common parameters by plugin:
- Serum: osc_a_wave, osc_a_level, filter_cutoff, filter_resonance, attack, decay, sustain, release
- Massive: osc1_pitch, osc1_wtpos, filter_cutoff, filter_resonance, attack, decay, sustain, release
- Sylenth1: osc_a1_wave, osc_a1_pitch, cutoff_a, resonance_a, attack, decay, sustain, release
- Pro-Q: gain, mix, band1_gain, band1_freq, band1_q, band2_gain, band2_freq
- Pro-C: threshold, ratio, attack, release, makeup
- ValhallaRoom: mix, decay, size, predelay
Args:
track_index: Index of the track containing the plugin
plugin_index: Index of the plugin in the device chain (0-based)
param_name: Name of the parameter to configure
value: New value for the parameter (normalized 0.0-1.0 or actual value)
Returns:
JSON with parameter configuration status
"""
try:
resp = _send_to_ableton(
"configure_vst_parameter",
{
"track_index": track_index,
"plugin_index": plugin_index,
"param_name": param_name,
"value": value,
},
timeout=TIMEOUTS["configure_vst_parameter"]
)
if resp.get("status") != "success":
return _err(resp.get("message", "Failed to configure parameter"))
return _ok({
"parameter_configured": True,
"track_index": track_index,
"plugin_index": plugin_index,
"param_name": param_name,
"value": value,
})
except Exception as e:
return _err(f"Error configuring VST parameter: {str(e)}")
@mcp.tool()
def get_vst_presets(ctx: Context, plugin_name: str) -> str:
"""Get available presets for a VST/AU plugin.
Args:
plugin_name: Name of the plugin (e.g., "Serum", "Pro-Q")
Returns:
JSON with preset list and plugin information
"""
try:
from engines.vst_manager import get_vst_presets as _get_presets
result = _get_presets(plugin_name)
if result.get("status") == "error":
return _err(result.get("message", "Unknown error"))
return _ok(result)
except Exception as e:
return _err(f"Error getting VST presets: {str(e)}")
# ------------------------------------------------------------------
# ==================================================================
# AGENTE 17: SECTION GENERATOR (Section-Based Composition)
# ==================================================================
@mcp.tool()
def generate_section_by_type(
ctx: Context,
section_type: str,
at_bar: int = 0,
duration_bars: int = 8,
key: str = "Am",
bpm: float = 95.0,
build_method: str = "gradual",
riser_type: str = "standard",
drum_fill_intensity: float = 0.8,
melodic_focus: bool = True,
drum_reduction: float = 0.7,
max_energy: bool = True,
all_elements: bool = True,
variation_type: str = "standard",
recap_type: str = "melody_only",
ending_style: str = "fade"
) -> str:
"""Genera una sección musical completa por tipo (Agente 17).
Este tool crea configuraciones completas para diferentes tipos de secciones
musicales: intro, build, breakdown, chorus, outro, y verse.
Args:
section_type: Tipo de sección - "intro", "build", "breakdown", "chorus", "outro", "verse"
at_bar: Posición inicial en compases (default 0)
duration_bars: Duración en compases (default 8)
key: Tonalidad musical (default "Am")
bpm: Tempo en BPM (default 95.0)
build_method: Para intro - "gradual", "sudden", "ambient", "rhythmic"
riser_type: Para build - "standard", "noise", "synth", "vocal", "minimal"
drum_fill_intensity: Para build - intensidad 0.0-1.0 (default 0.8)
melodic_focus: Para breakdown - enfocar en melodía (default True)
drum_reduction: Para breakdown - reducción 0.0-1.0 (default 0.7)
max_energy: Para chorus - máxima energía (default True)
all_elements: Para chorus - incluir todos los elementos (default True)
variation_type: Para chorus - "standard", "minimal", "double", "bouncy"
recap_type: Para outro - "full", "melody_only", "drums_only", "chords_only"
ending_style: Para outro - "fade", "cut", "breakdown", "loop"
Returns:
JSON con configuración completa de la sección generada.
Examples:
# Generar intro gradual de 8 compases
generate_section_by_type(section_type="intro", at_bar=0, duration_bars=8, build_method="gradual")
# Generar build con riser synth de 8 compases
generate_section_by_type(section_type="build", at_bar=8, duration_bars=8, riser_type="synth", drum_fill_intensity=0.9)
# Generar breakdown melódico de 8 compases
generate_section_by_type(section_type="breakdown", at_bar=16, duration_bars=8, melodic_focus=True)
# Generar chorus de 16 compases con máxima energía
generate_section_by_type(section_type="chorus", at_bar=24, duration_bars=16, max_energy=True, variation_type="standard")
# Generar outro con fade de 8 compases
generate_section_by_type(section_type="outro", at_bar=40, duration_bars=8, recap_type="melody_only", ending_style="fade")
"""
try:
# Importar SectionGenerator
from engines.section_generator import SectionGenerator, get_section_generator
# Obtener o crear generador
generator = get_section_generator(key=key, bpm=bpm)
# Generar configuración según tipo
config = None
if section_type.lower() == "intro":
config = generator.generate_intro(
build_method=build_method,
duration=duration_bars,
start_with_drums=False,
include_fx_riser=True
)
elif section_type.lower() == "build":
config = generator.generate_build(
riser_type=riser_type,
drum_fill_intensity=drum_fill_intensity,
duration=duration_bars,
filter_sweep=True
)
elif section_type.lower() == "breakdown":
config = generator.generate_breakdown(
melodic_focus=melodic_focus,
drum_reduction=drum_reduction,
duration=duration_bars,
include_buildup=True
)
elif section_type.lower() == "chorus":
config = generator.generate_chorus(
max_energy=max_energy,
all_elements=all_elements,
duration=duration_bars,
variation_type=variation_type
)
elif section_type.lower() == "outro":
config = generator.generate_outro(
recap_type=recap_type,
ending_style=ending_style,
duration=duration_bars,
include_melody=True
)
elif section_type.lower() == "verse":
config = generator.generate_verse(
variation="standard",
duration=duration_bars,
include_melody=False
)
else:
return _err(f"Unknown section type: {section_type}. Valid types: intro, build, breakdown, chorus, outro, verse")
# Ajustar posiciones para at_bar
adjusted_tracks = []
for track in config.tracks:
adjusted_track = track.copy()
adjusted_track["start_bar"] = at_bar + track.get("start_bar", 0)
adjusted_tracks.append(adjusted_track)
adjusted_fx = []
for fx in config.fx:
adjusted_fx_item = fx.copy()
adjusted_fx_item["start_bar"] = at_bar + fx.get("start_bar", 0)
adjusted_fx.append(adjusted_fx_item)
adjusted_automations = []
for auto in config.automations:
adjusted_auto = auto.copy()
adjusted_auto["start_bar"] = at_bar + auto.get("start_bar", 0)
adjusted_auto["end_bar"] = at_bar + auto.get("end_bar", duration_bars)
adjusted_automations.append(adjusted_auto)
return _ok({
"section_type": section_type,
"start_bar": at_bar,
"duration_bars": duration_bars,
"key": key,
"bpm": bpm,
"energy_level": config.energy_level,
"tracks": adjusted_tracks,
"fx": adjusted_fx,
"automations": adjusted_automations,
"status": "generated",
"note": f"Section '{section_type}' generated at bar {at_bar}. Use create_section_at_bar() to place in Arrangement View."
})
except ImportError:
return _err("SectionGenerator engine not available. Check that section_generator.py is properly installed.")
except Exception as e:
return _err(f"Error generating section: {str(e)}")
@mcp.tool()
def create_full_song_sections(
ctx: Context,
structure_type: str = "standard",
key: str = "Am",
bpm: float = 95.0,
start_bar: int = 0
) -> str:
"""Crea una estructura completa de canción con secciones (Agente 17).
Genera una secuencia completa de secciones: intro, verse, chorus, etc.
según el tipo de estructura solicitado.
Args:
structure_type: Tipo de estructura - "standard", "extended", "minimal"
key: Tonalidad musical (default "Am")
bpm: Tempo en BPM (default 95.0)
start_bar: Barra inicial (default 0)
Returns:
JSON con lista de secciones generadas y sus configuraciones.
Examples:
# Estructura estándar
create_full_song_sections(structure_type="standard", key="Am", bpm=95)
# Estructura extendida
create_full_song_sections(structure_type="extended", key="Dm", bpm=100)
"""
try:
from engines.section_generator import SectionGenerator, get_section_generator
generator = get_section_generator(key=key, bpm=bpm)
# Generar estructura completa
sections = generator.create_full_song_structure(
structure_type=structure_type,
total_duration=64
)
# Convertir a diccionarios y ajustar posiciones
results = []
current_bar = start_bar
for section in sections:
result = {
"section_type": section.section_type,
"start_bar": current_bar,
"duration_bars": section.duration_bars,
"energy_level": section.energy_level,
"key": section.key,
"tracks_count": len(section.tracks),
"fx_count": len(section.fx),
"automations_count": len(section.automations)
}
results.append(result)
current_bar += section.duration_bars
return _ok({
"structure_type": structure_type,
"key": key,
"bpm": bpm,
"total_sections": len(results),
"total_bars": current_bar - start_bar,
"start_bar": start_bar,
"sections": results,
"status": "generated",
"note": f"Generated {len(results)} sections totaling {current_bar - start_bar} bars. Use these configs with create_section_at_bar()."
})
except ImportError:
return _err("SectionGenerator engine not available.")
except Exception as e:
return _err(f"Error creating song sections: {str(e)}")
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# AGENTE 16: PAD AND TEXTURE LAYER SYSTEM
# ------------------------------------------------------------------
@mcp.tool()
def generate_texture_layers(ctx: Context,
track_index: int,
key: str = "Am",
bars: int = 16,
style: str = "ambient",
progression: str = "i_v_vi_iv",
density: float = 0.5,
apply_automation: bool = True) -> str:
"""Generate pad and texture layers for harmonic enrichment.
Creates multiple layers of pads with different characteristics:
- Ambient: Long, evolving pads
- Rhythmic: Syncopated chord patterns
- Arpeggiated: Rhythmic arpeggio patterns
- Full: Complete stack with all layers
Args:
track_index: Target track index for the pads
key: Musical key (default Am)
bars: Duration in bars (default 16)
style: Pad style - "ambient", "rhythmic", "arpeggiated", "full"
progression: Chord progression name ("i_v_vi_iv", "i_vi_iv_v", etc.)
density: Note density 0.0-1.0 (for rhythmic/arpeggiated)
apply_automation: Add filter/volume automation
Returns:
JSON with layer details and note counts
"""
try:
from engines import get_texture_engine
engine = get_texture_engine()
# Get chord progression
progressions = engine.get_available_progressions()
if progression not in progressions:
return _err(f"Unknown progression: {progression}. Available: {list(progressions.keys())}")
chord_prog = progressions[progression]
duration = bars * 4 # Convert bars to beats
# Generate texture based on style
if style == "ambient":
layer = engine.generate_ambient_pad(
chord_progression=chord_prog,
duration=duration,
key=key,
quality="add9",
voicing="spread"
)
if apply_automation:
layer = engine.apply_pad_automation(
layer,
volume_swells={"swells": [(0, 0.2), (bars//2, 0.7), (bars, 0.5)]}
)
layers = [layer]
elif style == "rhythmic":
ambient = engine.generate_ambient_pad(
chord_progression=chord_prog,
duration=duration,
key=key,
quality="sus2",
voicing="open"
)
rhythmic = engine.generate_rhythmic_pad(
chord_progression=chord_prog,
syncopation_pattern="latin",
duration=duration,
key=key,
density=density
)
layers = engine.layer_by_frequency_range(None, ambient, rhythmic)
elif style == "arpeggiated":
low_arp = engine.generate_arpeggiated_pad(
chord_progression=chord_prog,
arp_pattern="up",
duration=duration,
key=key,
rate="8th",
octave_range=1
)
high_arp = engine.generate_arpeggiated_pad(
chord_progression=chord_prog,
arp_pattern="up_down",
duration=duration,
key=key,
rate="16th",
octave_range=2
)
# Adjust octaves
for note in low_arp.notes:
note["pitch"] -= 12
for note in high_arp.notes:
note["pitch"] += 12
layers = [low_arp, high_arp]
elif style == "full":
config = engine.create_full_texture_stack(
key=key,
duration=duration,
style="full",
progression_name=progression
)
layers = config.layers
else:
return _err(f"Unknown style: {style}. Use: ambient, rhythmic, arpeggiated, full")
# Create MIDI clip and add notes
all_notes = []
for i, layer in enumerate(layers):
# Offset notes for each layer slightly
for note in layer.notes:
note_with_offset = note.copy()
note_with_offset["start_time"] += i * 0.02 # Micro-offset for phase
all_notes.append(note_with_offset)
# Send to Ableton
resp = _send_to_ableton(
"generate_texture_layers",
{
"track_index": track_index,
"notes": all_notes,
"duration": duration,
"style": style,
"layers": len(layers)
},
timeout=30.0
)
if resp.get("status") == "success":
result = resp.get("result", {})
return _ok({
"track_index": track_index,
"style": style,
"key": key,
"progression": progression,
"bars": bars,
"layers_created": len(layers),
"total_notes": len(all_notes),
"layer_details": [layer.to_dict() for layer in layers],
"clip_created": result.get("clip_created", False),
"notes_added": result.get("notes_added", 0),
})
return _err(resp.get("message", "Failed to generate texture layers"))
except ImportError as e:
return _err(f"Texture engine not available: {str(e)}")
except Exception as e:
return _err(f"Error generating texture layers: {str(e)}")
# ==================================================================
# AGENTE 15: REGGAETON RHYTHM PATTERNS LIBRARY
# ==================================================================
@mcp.tool()
def get_rhythmic_pattern(ctx: Context,
pattern_type: str = "dembow_classic",
bars: int = 4,
intensity: str = "standard",
heaviness: str = "medium",
fill_density: str = "medium",
hat_speed: str = "32nd",
complexity: str = "medium",
ghost_density: str = "medium",
style: str = "dembow") -> str:
"""Get a rhythmic pattern from the Reggaeton Patterns Library (Agente 15).
Returns detailed rhythmic patterns for reggaeton production including:
- dembow_classic: Patron dembow clasico (el ritmo caracteristico del reggaeton)
- moombahton: Mas lento y pesado (100-110 BPM feel)
- perreo_acelerado: Rapido e intenso (160-180 BPM feel)
- trapeton: Mezcla de reggaeton con trap, hi-hats en 32avos
- syncopated_kick: Kicks en off-beats para groove avanzado
- ghost_snare: Ghost notes en snare para feel humano
- open_hat: Posiciones optimas para open hi-hats
Each pattern returns a list of events with position (in beats), velocity (0-127),
and sample_type (kick, snare, hihat_closed, hihat_open, clap, etc.).
Args:
pattern_type: Type of pattern to generate
- "dembow_classic": Classic dembow pattern (kicks en 1,3 + snare en 2.25,4)
- "moombahton": Slower, heavier pattern with house-style snares
- "perreo_acelerado": Fast, intense pattern with double-time feel
- "trapeton": Trap-reggaeton fusion with 32nd note hi-hats
- "syncopated_kick": Off-beat kicks for advanced groove
- "ghost_snare": Ghost notes on snare for human feel
- "open_hat": Strategic open hi-hat placements
bars: Number of bars for the pattern (default 4)
intensity: For dembow_classic: "minimal", "standard", "intense"
heaviness: For moombahton: "light", "medium", "heavy"
fill_density: For perreo_acelerado: "low", "medium", "high"
hat_speed: For trapeton: "16th", "32nd", "64th_triplet"
complexity: For syncopated_kick: "simple", "medium", "complex"
ghost_density: For ghost_snare: "low", "medium", "high"
style: For open_hat: "dembow", "moombahton", "trap", "minimal"
Returns:
JSON with pattern_type, bars, event_count, events list, and available patterns.
Each event has: position (beats), velocity (0-127), sample_type (string).
Examples:
# Get classic dembow pattern
get_rhythmic_pattern(pattern_type="dembow_classic", bars=4, intensity="standard")
# Get heavy moombahton pattern
get_rhythmic_pattern(pattern_type="moombahton", bars=4, heaviness="heavy")
# Get fast perreo with high fill density
get_rhythmic_pattern(pattern_type="perreo_acelerado", bars=8, fill_density="high")
# Get trapeton with fast hi-hats
get_rhythmic_pattern(pattern_type="trapeton", bars=4, hat_speed="32nd")
# Get syncopated kicks for layering
get_rhythmic_pattern(pattern_type="syncopated_kick", bars=4, complexity="complex")
# Get ghost snare pattern
get_rhythmic_pattern(pattern_type="ghost_snare", bars=4, ghost_density="medium")
# Get open hat placements
get_rhythmic_pattern(pattern_type="open_hat", bars=4, style="dembow")
"""
try:
from engines.reggaeton_patterns import ReggaetonPatterns
# Map pattern type to method and parameters
pattern_methods = {
"dembow_classic": ("get_dembow_classic", {"intensity": intensity}),
"moombahton": ("get_moombahton_pattern", {"heaviness": heaviness}),
"perreo_acelerado": ("get_perreo_acelerado", {"fill_density": fill_density}),
"trapeton": ("get_trapeton_pattern", {"hat_speed": hat_speed}),
"syncopated_kick": ("get_syncopated_kick", {"complexity": complexity}),
"ghost_snare": ("get_ghost_snare_pattern", {"ghost_density": ghost_density}),
"open_hat": ("get_open_hat_placement", {"style": style}),
}
if pattern_type not in pattern_methods:
return _err(f"Invalid pattern_type: {pattern_type}. "
f"Available: {list(pattern_methods.keys())}")
method_name, method_kwargs = pattern_methods[pattern_type]
method = getattr(ReggaetonPatterns, method_name)
# Get the pattern events
events = method(bars=bars, **method_kwargs)
# Convert to dict format
events_dict = [e.to_dict() for e in events]
return _ok({
"pattern_type": pattern_type,
"bars": bars,
"event_count": len(events_dict),
"events": events_dict,
"available_patterns": list(pattern_methods.keys()),
"method_used": method_name,
"parameters_used": method_kwargs
})
except ImportError:
return _err("Reggaeton patterns engine not available. "
"Ensure reggaeton_patterns.py is present in engines/")
except ValueError as e:
return _err(f"Invalid parameter: {str(e)}")
except Exception as e:
return _err(f"Error generating rhythmic pattern: {str(e)}")
# ------------------------------------------------------------------
# SPRINT 5: DJ PROFESSIONAL TRACK GENERATION
# ------------------------------------------------------------------
@mcp.tool()
def generate_dj_professional_track(
ctx: Context,
description: str,
tempo: int = 95,
key: str = "Am",
include_dj_extended: bool = True,
include_radio_edit: bool = True,
sample_count_target: int = 330
) -> str:
"""Generate a professional DJ track with extended and radio edit versions.
Creates a complete professional track production including both DJ Extended
version (for mixing, with extended intro/outro) and Radio Edit version
(shorter, direct for broadcast). Uses extensive sample library utilization
for maximum sonic richness.
Args:
description: Natural language description of desired track.
Examples:
- "reggaeton perreo intenso 95bpm Am"
- "romantico suave 90bpm Gm con piano"
- "trap oscuro 140bpm Cm, agresivo"
tempo: Tempo in BPM (default 95)
key: Musical key e.g. "Am", "Cm", "Gm" (default "Am")
include_dj_extended: Create DJ Extended version with extended intro/outro
include_radio_edit: Create Radio Edit version (shorter, direct)
sample_count_target: Target number of samples to use per version (default 330)
Returns:
JSON with complete production details:
- status: "success" or "error"
- dj_extended: Dict with tracks, samples used, duration for DJ version
- radio_edit: Dict with tracks, samples used, duration for Radio version
- total_samples_used: Total samples across both versions (660 if both)
- total_tracks: Total number of tracks created (21)
- qa_scores: Quality assurance scores for each version
- message: Human-readable status message
Example:
generate_dj_professional_track(
description="reggaeton perreo intenso 95bpm Am",
tempo=95,
key="Am",
include_dj_extended=True,
include_radio_edit=True,
sample_count_target=330
)
Expected output format:
{
"status": "success",
"dj_extended": {
"tracks": ["Kick", "Snare", "HiHat", "Bass", ...],
"samples_used": ["kick_1.wav", "snare_1.wav", ...],
"duration_bars": 128,
"version": "DJ Extended"
},
"radio_edit": {
"tracks": ["Kick", "Snare", "HiHat", "Bass", ...],
"samples_used": ["kick_1.wav", "snare_1.wav", ...],
"duration_bars": 64,
"version": "Radio Edit"
},
"total_samples_used": 660,
"total_tracks": 21,
"qa_scores": {
"dj_extended": 0.92,
"radio_edit": 0.90
},
"message": "DJ Professional track generated successfully with 660 samples across 2 versions"
}
"""
try:
from engines.integration import get_integration_coordinator
coordinator = get_integration_coordinator()
result = coordinator.build_dj_professional_production(
description=description,
tempo=tempo,
key=key,
include_dj_extended=include_dj_extended,
include_radio_edit=include_radio_edit,
sample_count_target=sample_count_target
)
# Calculate totals
total_samples = 0
if include_dj_extended:
total_samples += sample_count_target
if include_radio_edit:
total_samples += sample_count_target
# Format response
return _ok({
"status": "success" if result.get("success", False) else "error",
"dj_extended": {
"tracks": result.get("dj_extended_tracks", []),
"samples_used": result.get("dj_extended_samples", []),
"duration_bars": result.get("dj_extended_duration", 128),
"version": "DJ Extended"
} if include_dj_extended else None,
"radio_edit": {
"tracks": result.get("radio_edit_tracks", []),
"samples_used": result.get("radio_edit_samples", []),
"duration_bars": result.get("radio_edit_duration", 64),
"version": "Radio Edit"
} if include_radio_edit else None,
"total_samples_used": total_samples,
"total_tracks": result.get("total_tracks", 21),
"qa_scores": {
"dj_extended": result.get("dj_extended_qa_score", 0.0),
"radio_edit": result.get("radio_edit_qa_score", 0.0)
},
"message": result.get("message",
f"DJ Professional track generated with {total_samples} samples across "
f"{(1 if include_dj_extended else 0) + (1 if include_radio_edit else 0)} versions")
})
except ImportError:
return _err("Integration coordinator not available. Ensure engines.integration module is installed.")
except AttributeError:
return _err("build_dj_professional_production method not yet implemented in integration coordinator.")
except Exception as e:
return _err(f"Error generating DJ professional track: {str(e)}")
# ==================================================================
# SPRINT 5.5: ADVANCED PRODUCTION TOOLS
# ==================================================================
@mcp.tool()
def inject_sample_batch(ctx: Context, samples: list, target_track: int = None) -> str:
"""Inject up to 50 samples into the project with 10s timeout.
Injects a batch of samples into the Arrangement View for rapid
track building. Samples are placed sequentially or on specified tracks.
Args:
samples: List of sample dicts with keys: path, position (bars),
track_index (optional), warp (optional)
target_track: Default track index to use if not specified in samples
Returns:
JSON with injection status, samples loaded, and any errors.
Example:
inject_sample_batch(
samples=[
{"path": "kick.wav", "position": 0, "track_index": 0},
{"path": "snare.wav", "position": 0, "track_index": 1},
],
target_track=0
)
"""
try:
if len(samples) > 50:
return _err(f"Too many samples: {len(samples)}. Maximum is 50 per batch.")
if not samples:
return _err("No samples provided. Provide at least one sample.")
loaded = []
errors = []
for i, sample in enumerate(samples):
try:
path = sample.get("path", "")
position = sample.get("position", 0)
track_idx = sample.get("track_index", target_track)
warp = sample.get("warp", True)
if not path or not os.path.isfile(path):
errors.append({"index": i, "error": f"File not found: {path}"})
continue
if track_idx is None:
errors.append({"index": i, "error": "No track index specified"})
continue
# Create audio clip in Arrangement
resp = _send_to_ableton(
"create_arrangement_audio_clip",
{
"track_index": track_idx,
"sample_path": path,
"start_time": position,
"length": 4.0,
"warp": warp
},
timeout=5.0
)
if resp.get("status") == "success":
loaded.append({
"index": i,
"path": path,
"track_index": track_idx,
"position": position
})
else:
errors.append({
"index": i,
"error": resp.get("message", "Failed to load sample")
})
except Exception as e:
errors.append({"index": i, "error": str(e)})
return _ok({
"samples_loaded": len(loaded),
"samples_total": len(samples),
"loaded": loaded,
"errors": errors,
"error_count": len(errors)
})
except Exception as e:
return _err(f"Error injecting sample batch: {str(e)}")
@mcp.tool()
def validate_coherence(ctx: Context, sample_paths: list, threshold: float = 0.85) -> str:
"""Validate sample compatibility with 15s timeout.
Analyzes spectral and rhythmic compatibility between samples
to ensure they work well together in a mix.
Args:
sample_paths: List of sample file paths to validate
threshold: Minimum coherence score (0.0-1.0) for compatibility
Returns:
JSON with coherence scores, pairwise comparisons, and recommendations.
Example:
validate_coherence(
sample_paths=["kick.wav", "snare.wav", "bass.wav"],
threshold=0.85
)
"""
try:
from engines.real_coherence_validator import RealCoherenceValidator
if len(sample_paths) < 2:
return _err("Need at least 2 samples to validate coherence.")
if not 0.0 <= threshold <= 1.0:
return _err(f"Invalid threshold: {threshold}. Must be 0.0-1.0.")
validator = RealCoherenceValidator()
results = validator.validate_batch(sample_paths)
# Calculate overall coherence
scores = [r.get("coherence_score", 0) for r in results]
avg_score = sum(scores) / len(scores) if scores else 0
# Identify incompatible pairs
incompatible = [
r for r in results
if r.get("coherence_score", 0) < threshold
]
return _ok({
"average_coherence": round(avg_score, 3),
"threshold": threshold,
"samples_validated": len(sample_paths),
"pairwise_results": results,
"incompatible_pairs": incompatible,
"is_compatible": len(incompatible) == 0,
"recommendation": "All samples are compatible" if len(incompatible) == 0
else f"{len(incompatible)} pairs below threshold"
})
except ImportError:
return _err("Coherence validator engine not available.")
except Exception as e:
return _err(f"Error validating coherence: {str(e)}")
@mcp.tool()
def build_section_real(ctx: Context, section_type: str, at_bar: int,
duration_bars: int = 8, key: str = "Am",
bpm: float = 95, include_automation: bool = True) -> str:
"""Build section with automation (15s timeout).
Creates a complete song section directly in Arrangement View with
optional filter sweeps, volume automation, and FX.
Args:
section_type: Type of section - "intro", "verse", "chorus", "bridge", "outro"
at_bar: Starting bar position in the arrangement
duration_bars: Length of the section in bars (default 8)
key: Musical key (default "Am")
bpm: Tempo in BPM (default 95)
include_automation: Add filter sweeps and volume automation
Returns:
JSON with section creation status, tracks affected, and automation details.
Example:
build_section_real(
section_type="chorus",
at_bar=16,
duration_bars=16,
key="Am",
include_automation=True
)
"""
try:
# Validate section type
valid_types = ["intro", "verse", "chorus", "bridge", "outro", "build", "drop"]
if section_type.lower() not in valid_types:
return _err(f"Invalid section_type: {section_type}. Must be one of: {', '.join(valid_types)}")
# Create the section via Ableton
resp = _send_to_ableton(
"create_section_at_bar",
{
"section_type": section_type.lower(),
"at_bar": at_bar,
"duration_bars": duration_bars,
"key": key,
"bpm": bpm
},
timeout=TIMEOUTS["build_section_real"]
)
if resp.get("status") != "success":
return _err(resp.get("message", "Failed to create section"))
result = resp.get("result", {})
# Apply automation if requested
automation_applied = []
if include_automation:
tracks_affected = result.get("tracks_affected", [])
for track_idx in tracks_affected[:3]: # Limit to first 3 tracks
try:
auto_resp = _send_to_ableton(
"automate_filter",
{
"track_index": track_idx,
"start_bar": at_bar,
"end_bar": at_bar + duration_bars,
"start_freq": 200 if section_type == "intro" else 800,
"end_freq": 20000 if section_type in ["chorus", "drop"] else 8000,
"curve_type": "s_curve"
},
timeout=5.0
)
if auto_resp.get("status") == "success":
automation_applied.append({
"track_index": track_idx,
"type": "filter_sweep"
})
except Exception:
pass # Continue even if automation fails
return _ok({
"section_type": section_type,
"at_bar": at_bar,
"duration_bars": duration_bars,
"key": key,
"bpm": bpm,
"tracks_affected": result.get("tracks_affected", []),
"automation_applied": automation_applied,
"automation_count": len(automation_applied),
"status": "created"
})
except Exception as e:
return _err(f"Error building section: {str(e)}")
@mcp.tool()
def select_coherent_kit(ctx: Context, genre: str = "reggaeton",
sample_count: int = 12, coherence_threshold: float = 0.90,
key: str = "", bpm: float = 0) -> str:
"""Selects 12 coherent samples (20s timeout).
Intelligently selects a kit of samples that work well together
based on spectral analysis and genre matching.
Args:
genre: Genre for sample selection (default "reggaeton")
sample_count: Number of samples to select (default 12, max 20)
coherence_threshold: Minimum coherence between samples (0.0-1.0)
key: Musical key filter (optional)
bpm: BPM filter (optional)
Returns:
JSON with selected samples, coherence scores, and kit configuration.
Example:
select_coherent_kit(
genre="reggaeton",
sample_count=12,
coherence_threshold=0.90,
key="Am"
)
"""
try:
from engines.sample_selector import SampleSelector, get_selector
from engines.real_coherence_validator import RealCoherenceValidator
if not 1 <= sample_count <= 20:
return _err(f"Invalid sample_count: {sample_count}. Must be 1-20.")
if not 0.0 <= coherence_threshold <= 1.0:
return _err(f"Invalid coherence_threshold: {coherence_threshold}. Must be 0.0-1.0.")
selector = get_selector()
if selector is None:
return _err("Sample selector not available. Check libreria path.")
# Select samples for genre
group = selector.select_for_genre(
genre,
key if key else None,
bpm if bpm > 0 else None
)
# Collect all available samples
all_samples = []
if group.drums:
if group.drums.kick:
all_samples.append(("kick", group.drums.kick))
if group.drums.snare:
all_samples.append(("snare", group.drums.snare))
if group.drums.hat_closed:
all_samples.append(("hat", group.drums.hat_closed))
if group.drums.clap:
all_samples.append(("clap", group.drums.clap))
for bass in group.bass[:3]:
all_samples.append(("bass", bass))
for synth in group.synths[:3]:
all_samples.append(("synth", synth))
for fx in group.fx[:2]:
all_samples.append(("fx", fx))
# Limit to requested count
selected = all_samples[:sample_count]
# Validate coherence
validator = CoherenceValidator()
sample_paths = [s[1].path for s in selected if hasattr(s[1], 'path')]
coherence_result = None
if len(sample_paths) >= 2:
coherence_result = validator.validate_batch(sample_paths)
# Format result
kit = {
"genre": genre,
"key": key or group.key,
"bpm": bpm or group.bpm,
"sample_count": len(selected),
"samples": [
{
"role": role,
"name": getattr(sample, 'name', str(sample)),
"path": getattr(sample, 'path', ''),
"bpm": getattr(sample, 'bpm', 0),
"key": getattr(sample, 'key', '')
}
for role, sample in selected
],
"coherence_validation": coherence_result,
"coherence_threshold": coherence_threshold
}
return _ok(kit)
except ImportError:
return _err("Sample selector or coherence engine not available.")
except Exception as e:
return _err(f"Error selecting coherent kit: {str(e)}")
@mcp.tool()
def produce_radio_edit_4min(ctx: Context, description: str,
tempo: int = 95, key: str = "Am",
target_duration_seconds: int = 240) -> str:
"""Full 4-min radio edit production (600s timeout).
Generates a complete radio-ready 4-minute track optimized for
broadcast with proper intro length, verse-chorus structure,
and clean outro.
Args:
description: Natural language description of desired track
tempo: Tempo in BPM (default 95)
key: Musical key (default "Am")
target_duration_seconds: Target duration in seconds (default 240 = 4 min)
Returns:
JSON with production summary, tracks created, and render info.
Example:
produce_radio_edit_4min(
description="reggaeton perreo intenso",
tempo=95,
key="Am"
)
"""
try:
from engines.integration import get_integration_coordinator
# Calculate bars from seconds
bars = int((target_duration_seconds / 60) * (tempo / 4))
coordinator = get_integration_coordinator()
# Build the production
result = coordinator.build_expansive_production(
description=description,
samples_per_category=8,
variation_strategy="coherent",
coherence_threshold=0.90,
structure_type="short" # Optimized for radio
)
return _ok({
"status": "success" if result.get("success") else "error",
"description": description,
"tempo": tempo,
"key": key,
"target_duration_seconds": target_duration_seconds,
"estimated_bars": bars,
"tracks_created": result.get("tracks_created", []),
"samples_used": result.get("samples_used", {}),
"coherence_scores": result.get("coherence_scores", {}),
"qa_score": result.get("qa_score", 0.0),
"message": f"Radio edit production completed: {target_duration_seconds}s track"
})
except ImportError:
return _err("Integration coordinator not available.")
except Exception as e:
return _err(f"Error producing radio edit: {str(e)}")
@mcp.tool()
def get_production_progress(ctx: Context) -> str:
"""Gets production status (5s timeout).
Returns the current state of any in-progress production,
including tracks created, samples loaded, and estimated completion.
Returns:
JSON with production status, progress percentage, and current phase.
Example:
get_production_progress()
"""
try:
from engines.workflow_engine import WorkflowEngine
engine = WorkflowEngine()
# Get current progress
progress_data = engine.get_progress_report()
# Also get arrangement status
arr_resp = _send_to_ableton("get_arrangement_clips", timeout=3.0)
arrangement_data = {}
if arr_resp.get("status") == "success":
r = arr_resp.get("result", {})
arrangement_data = {
"total_clips": r.get("total_clips", 0),
"arrangement_length_beats": r.get("arrangement_length_beats", 0)
}
return _ok({
"production_phase": progress_data.get("current_phase", "unknown"),
"completion_percent": progress_data.get("completion", 0),
"tasks_done": progress_data.get("tasks_done", 0),
"tasks_total": progress_data.get("tasks_total", 0),
"time_invested": progress_data.get("time_invested", "0h 0m"),
"milestones": progress_data.get("milestones", []),
"arrangement": arrangement_data,
"status": "in_progress" if progress_data.get("completion", 0) < 100 else "complete"
})
except ImportError:
return _err("Workflow engine not available.")
except Exception as e:
return _err(f"Error getting production progress: {str(e)}")
@mcp.tool()
def produce_with_spectral_coherence(ctx: Context,
bpm: int = 100,
key: str = "Am",
style: str = "standard",
coherence_threshold: float = 0.90,
max_samples_per_role: int = 12,
auto_record: bool = True) -> str:
"""
Genera una cancion profesional con seleccion espectral coherente.
Usa los 511 samples analizados para crear una produccion donde TODOS
los samples son espectralmente coherentes (mismo timbre, energia compatible).
Args:
bpm: Tempo del proyecto (default 100)
key: Tonalidad (default Am)
style: Estilo de produccion (standard, minimal, trap, perreo)
coherence_threshold: Minimo score de coherencia (0.0-1.0, default 0.90 profesional)
max_samples_per_role: Cuantos samples usar por rol (default 12)
auto_record: Grabar a Arrangement View automaticamente
Returns:
JSON con detalles de la produccion, coherencia por rol, y samples usados.
"""
import sqlite3
import numpy as np
import pickle
from pathlib import Path
DB_PATH = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\libreria\reggaeton\sample_metadata.db"
LIBRARY_PATH = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\libreria\reggaeton"
try:
# Conectar a base de datos con features espectrales
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
# Verificar que hay datos
cursor.execute("SELECT COUNT(*) FROM samples")
total_samples = cursor.fetchone()[0]
if total_samples == 0:
return _err("Database vacia. Ejecutar analisis de libreria primero.")
logger.info(f"[SPECTRAL] {total_samples} samples disponibles en base de datos")
# Mapeo de roles a categorias
ROLE_CATEGORIES = {
"kick": ["kick", "kicks", "8. KICKS", "kicks"],
"snare": ["snare", "snares", "9. SNARE", "snares"],
"hihat": ["hi-hat", "hi_hat", "hihats", "hat", "hats"],
"perc": ["perc", "percs", "perc loop", "10. PERCS", "PERC"],
"bass": ["bass", "basses", "Bass", "BASS", "reese"],
"drumloop": ["drumloop", "drumloops", "4. DRUM LOOPS", "LATINOS - DRUM LOOPS"],
"oneshot": ["oneshot", "oneshots", "3. ONE SHOTS", "LATINOS - ONE SHOTS", "20 One Shots"],
"fx": ["fx", "FX", "5. FX", "transicion"],
"vocal": ["vocal", "vocals", "11. VOCALS", "20 Vocals Phrases"],
"pad": ["pad", "pads", "PAD"],
"lead": ["lead", "leads", "LEAD"]
}
def get_samples_for_role(role, min_coherence=0.85):
"""Selecciona samples coherentes para un rol."""
categories = ROLE_CATEGORIES.get(role, [role])
# Buscar samples de las categorias del rol
samples = []
for cat in categories:
cursor.execute("""
SELECT s.path, s.bpm, s.key, s.duration, s.rms,
s.spectral_centroid, s.spectral_rolloff, s.zero_crossing_rate,
s.mfcc_1, s.mfcc_2, s.mfcc_3, s.mfcc_4, s.mfcc_5,
s.mfcc_6, s.mfcc_7, s.mfcc_8, s.mfcc_9, s.mfcc_10,
s.mfcc_11, s.mfcc_12, s.mfcc_13,
sb.embedding, sb.spectral_features
FROM samples s
JOIN samples_bpm sb ON s.path = sb.path
WHERE s.category LIKE ?
AND s.duration > 0
ORDER BY s.duration DESC
""", (f"%{cat}%",))
for row in cursor.fetchall():
samples.append({
'path': row[0],
'bpm': row[1] or bpm,
'key': row[2] or key,
'duration': row[3],
'rms': row[4] or -20,
'spectral_centroid': row[5] or 2000,
'spectral_rolloff': row[6] or 4000,
'zcr': row[7] or 0.1,
'mfccs': list(row[8:21]),
'embedding': row[21],
'spectral_features': row[22]
})
if len(samples) < 2:
logger.warning(f"[SPECTRAL] Pocos samples para rol {role}: {len(samples)}")
return samples[:max_samples_per_role]
# Calcular coherencia entre pares y seleccionar los mas coherentes
selected = [samples[0]] # Empezar con el primero
for candidate in samples[1:]:
if len(selected) >= max_samples_per_role:
break
# Calcular coherencia promedio con los ya seleccionados
coherence_scores = []
for selected_sample in selected:
score = calculate_coherence(candidate, selected_sample)
coherence_scores.append(score)
avg_coherence = np.mean(coherence_scores) if coherence_scores else 0
if avg_coherence >= min_coherence:
selected.append(candidate)
logger.debug(f"[SPECTRAL] {role}: {candidate['path'][:30]}... coherencia={avg_coherence:.3f}")
logger.info(f"[SPECTRAL] Rol {role}: {len(selected)} samples seleccionados (coherencia >= {min_coherence})")
return selected
def calculate_coherence(s1, s2):
"""Calcula coherencia entre dos samples usando features pre-calculadas."""
scores = []
# 1. Similitud de timbre (MFCC) - 40%
mfcc_sim = cosine_similarity(s1['mfccs'], s2['mfccs'])
scores.append(mfcc_sim * 0.40)
# 2. Compatibilidad espectral - 30%
centroid_diff = abs(s1['spectral_centroid'] - s2['spectral_centroid']) / max(s1['spectral_centroid'], 1)
centroid_sim = max(0, 1 - centroid_diff)
scores.append(centroid_sim * 0.30)
# 3. Balance de energia - 20%
rms_diff = abs(s1['rms'] - s2['rms']) / 60 # Normalizar
rms_sim = max(0, 1 - rms_diff)
scores.append(rms_sim * 0.20)
# 4. ZCR compatibilidad - 10%
zcr_sim = 1 - min(1, abs(s1['zcr'] - s2['zcr']) * 10)
scores.append(zcr_sim * 0.10)
return sum(scores)
def cosine_similarity(v1, v2):
"""Calcula similitud coseno entre dos vectores."""
try:
v1_arr = np.array(v1)
v2_arr = np.array(v2)
dot = np.dot(v1_arr, v2_arr)
norm = np.linalg.norm(v1_arr) * np.linalg.norm(v2_arr)
return float(dot / norm) if norm > 0 else 0.0
except:
return 0.0
# Seleccionar samples coherentes por rol
logger.info("[SPECTRAL] Iniciando seleccion coherente...")
selected_kits = {}
coherence_scores = {}
for role in ["kick", "snare", "hihat", "perc", "bass", "drumloop", "oneshot", "fx"]:
samples = get_samples_for_role(role, min_coherence=coherence_threshold)
selected_kits[role] = samples
# Calcular score promedio de coherencia para este rol
if len(samples) >= 2:
pairwise_scores = []
for i in range(len(samples)):
for j in range(i+1, len(samples)):
score = calculate_coherence(samples[i], samples[j])
pairwise_scores.append(score)
avg_coherence = np.mean(pairwise_scores) if pairwise_scores else 0
else:
avg_coherence = 0.85 # Default si solo hay 1 sample
coherence_scores[role] = round(avg_coherence, 3)
# Reporte de coherencia
overall_coherence = np.mean(list(coherence_scores.values()))
logger.info(f"[SPECTRAL] Coherencia general: {overall_coherence:.3f}")
# Ahora crear la produccion con los samples seleccionados
tracks_created = []
samples_loaded = []
# Crear tracks y cargar samples coherentes
for role_idx, (role, samples) in enumerate(selected_kits.items()):
if not samples:
continue
# Crear track
track_result = _send_to_ableton(
"create_audio_track",
{"index": -1},
timeout=TIMEOUTS["create_audio_track"]
)
if track_result.get("status") != "success":
continue
track_index = track_result["result"]["track_index"]
# Renombrar track
_send_to_ableton(
"set_track_name",
{"track_index": track_index, "name": f"{role.title()} Spectral"},
timeout=10.0
)
# Cargar samples coherentes en slots
for slot_idx, sample in enumerate(samples[:8]): # Max 8 slots
sample_path = os.path.join(LIBRARY_PATH, sample['path'])
if os.path.exists(sample_path):
load_result = _send_to_ableton(
"load_sample_to_clip",
{"track_index": track_index, "clip_index": slot_idx, "sample_path": sample_path},
timeout=TIMEOUTS["load_sample_to_clip"]
)
if load_result.get("status") == "success":
samples_loaded.append({
"role": role,
"track": track_index,
"slot": slot_idx,
"path": sample['path'],
"bpm": sample['bpm'],
"key": sample['key'],
"duration": sample['duration']
})
tracks_created.append({
"role": role,
"track_index": track_index,
"samples_count": len([s for s in samples_loaded if s['role'] == role])
})
conn.close()
# Disparar clips para escuchar
for track_info in tracks_created:
if track_info['samples_count'] > 0:
_send_to_ableton(
"fire_clip",
{"track_index": track_info['track_index'], "clip_index": 0},
timeout=10.0
)
# Iniciar playback
_send_to_ableton("start_playback", {}, timeout=10.0)
return _ok({
"status": "success",
"message": "Produccion profesional con coherencia espectral creada",
"total_samples_analyzed": total_samples,
"samples_used": len(samples_loaded),
"tracks_created": len(tracks_created),
"coherence_threshold": coherence_threshold,
"coherence_scores_by_role": coherence_scores,
"overall_coherence": round(overall_coherence, 3),
"is_professional": overall_coherence >= 0.90,
"tracks": tracks_created,
"samples": samples_loaded[:20], # Primeros 20 para preview
"project_bpm": bpm,
"project_key": key,
"style": style
})
except Exception as e:
logger.error(f"[SPECTRAL] Error: {str(e)}")
return _err(f"Error en produccion espectral: {str(e)}")
# ------------------------------------------------------------------
# MAIN
# ------------------------------------------------------------------
if __name__ == "__main__":
mcp.run()