- Add 5 new MCP tools to server.py: * create_riser (T031) - Pre-drop buildup effect * create_downlifter (T032) - Post-drop energy release * create_impact (T033) - Hit, crash, sub_drop, noise impacts * create_silence (T034) - Break/silence effects * create_fx_section (T035) - Complete FX sections - Add 5 handlers to __init__.py for Remote Script execution - Update skill_produccion_audio.md with FX tools documentation All tools exposed and ready for professional FX generation. Closes Agente 1 of 20 - FX Creator implementation
4402 lines
191 KiB
Python
4402 lines
191 KiB
Python
"""
|
||
AbletonMCP_AI MCP Server - Clean FastMCP server for Ableton Live 12.
|
||
Communicates with the Ableton Remote Script via TCP socket on port 9877.
|
||
"""
|
||
import json
|
||
import logging
|
||
import os
|
||
import socket
|
||
import sys
|
||
import time
|
||
from contextlib import asynccontextmanager
|
||
from pathlib import Path
|
||
from typing import Optional
|
||
|
||
from mcp.server.fastmcp import FastMCP, Context
|
||
|
||
# ------------------------------------------------------------------
|
||
# Paths
|
||
# ------------------------------------------------------------------
|
||
BASE_DIR = Path(__file__).resolve().parent.parent.parent # MIDI Remote Scripts root
|
||
PROJECT_DIR = Path(__file__).resolve().parent.parent # AbletonMCP_AI
|
||
MCP_DIR = Path(__file__).resolve().parent # AbletonMCP_AI/mcp
|
||
ENGINE_DIR = MCP_DIR / "engines"
|
||
|
||
# Add engine dir to path so we can import them
|
||
for p in (str(ENGINE_DIR), str(MCP_DIR), str(PROJECT_DIR)):
|
||
if p not in sys.path:
|
||
sys.path.insert(0, p)
|
||
|
||
# ------------------------------------------------------------------
|
||
# Logging
|
||
# ------------------------------------------------------------------
|
||
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(name)s] %(levelname)s: %(message)s")
|
||
logger = logging.getLogger("AbletonMCP-AI")
|
||
|
||
# ------------------------------------------------------------------
|
||
# Ableton TCP connection
|
||
# ------------------------------------------------------------------
|
||
ABLETON_HOST = "127.0.0.1"
|
||
ABLETON_PORT = 9877
|
||
TERMINATOR = b"\n"
|
||
|
||
# Tool timeouts (seconds)
|
||
TIMEOUTS = {
|
||
"get_session_info": 5.0,
|
||
"get_tracks": 5.0,
|
||
"get_scenes": 5.0,
|
||
"get_master_info": 5.0,
|
||
"set_tempo": 10.0,
|
||
"start_playback": 10.0,
|
||
"stop_playback": 10.0,
|
||
"toggle_playback": 10.0,
|
||
"stop_all_clips": 10.0,
|
||
"create_midi_track": 15.0,
|
||
"create_audio_track": 15.0,
|
||
"set_track_name": 10.0,
|
||
"set_track_volume": 10.0,
|
||
"set_track_pan": 10.0,
|
||
"set_track_mute": 10.0,
|
||
"set_track_solo": 10.0,
|
||
"set_master_volume": 10.0,
|
||
"create_clip": 15.0,
|
||
"add_notes_to_clip": 15.0,
|
||
"fire_clip": 10.0,
|
||
"fire_scene": 10.0,
|
||
"set_scene_name": 10.0,
|
||
"create_scene": 15.0,
|
||
"set_metronome": 10.0,
|
||
"set_loop": 10.0,
|
||
"set_signature": 10.0,
|
||
"create_arrangement_audio_pattern": 30.0,
|
||
"load_sample_to_drum_rack": 30.0,
|
||
"generate_track": 300.0,
|
||
"generate_song": 300.0,
|
||
"select_samples_for_genre": 30.0,
|
||
# Sprint 2 - Phase 1 & 2: Advanced Production Tools
|
||
"generate_complete_reggaeton": 60.0,
|
||
"generate_from_reference": 60.0,
|
||
"load_sample_to_clip": 15.0,
|
||
"create_arrangement_audio_clip": 20.0,
|
||
"set_warp_markers": 15.0,
|
||
"reverse_clip": 10.0,
|
||
"pitch_shift_clip": 15.0,
|
||
"time_stretch_clip": 15.0,
|
||
"slice_clip": 20.0,
|
||
# Fase 3: Mixing & Effects
|
||
"create_bus_track": 15.0,
|
||
"route_track_to_bus": 10.0,
|
||
"create_return_track": 15.0,
|
||
"set_track_send": 10.0,
|
||
"insert_device": 15.0,
|
||
"configure_eq": 15.0,
|
||
"configure_compressor": 15.0,
|
||
"setup_sidechain": 15.0,
|
||
"auto_gain_staging": 20.0,
|
||
"apply_master_chain": 20.0,
|
||
# Fase 4: Workflow & Export
|
||
"export_project": 60.0,
|
||
"get_project_summary": 10.0,
|
||
"suggest_improvements": 15.0,
|
||
"validate_project": 15.0,
|
||
"humanize_track": 15.0,
|
||
# Phase 1 & 2 - Bridge Engines to Ableton (T001-T040)
|
||
"produce_reggaeton": 300.0,
|
||
"produce_from_reference": 300.0,
|
||
"produce_arrangement": 300.0,
|
||
"complete_production": 300.0,
|
||
"batch_produce": 600.0,
|
||
"generate_midi_clip": 30.0,
|
||
"generate_dembow_clip": 30.0,
|
||
"generate_bass_clip": 30.0,
|
||
"generate_chords_clip": 30.0,
|
||
"generate_melody_clip": 30.0,
|
||
"create_drum_kit": 30.0,
|
||
"build_track_from_samples": 60.0,
|
||
"generate_track_from_config": 120.0,
|
||
"generate_section": 60.0,
|
||
"apply_human_feel": 30.0,
|
||
"add_percussion_fills": 30.0,
|
||
# Phase 2 - Arrangement & Automation
|
||
"build_arrangement_structure": 60.0,
|
||
"create_arrangement_midi_clip": 30.0,
|
||
"create_arrangement_audio_clip": 30.0,
|
||
"fill_arrangement_with_song": 300.0,
|
||
"automate_filter": 30.0,
|
||
# Musical intelligence / workflow / quality
|
||
"analyze_project_key": 20.0,
|
||
"harmonize_track": 30.0,
|
||
"generate_counter_melody": 30.0,
|
||
"detect_energy_curve": 20.0,
|
||
"balance_sections": 20.0,
|
||
"variate_loop": 30.0,
|
||
"add_call_and_response": 30.0,
|
||
"generate_breakdown": 30.0,
|
||
"generate_drop_variation": 30.0,
|
||
"create_outro": 30.0,
|
||
"render_stems": 120.0,
|
||
"render_full_mix": 120.0,
|
||
"render_instrumental": 120.0,
|
||
"full_quality_check": 30.0,
|
||
"fix_quality_issues": 60.0,
|
||
"duplicate_project": 30.0,
|
||
# Intelligent Track Generation (T200+)
|
||
"generate_intelligent_track": 300.0,
|
||
"create_radio_edit": 60.0,
|
||
"create_dj_edit": 60.0,
|
||
"undo": 10.0,
|
||
"redo": 10.0,
|
||
"save_checkpoint": 20.0,
|
||
"health_check": 10.0,
|
||
}
|
||
|
||
|
||
def _send_to_ableton(cmd_type: str, params: dict = None, timeout: float = 15.0) -> dict:
|
||
"""Send a command to the Ableton Remote Script and return the response."""
|
||
sock = None
|
||
try:
|
||
sock = socket.create_connection((ABLETON_HOST, ABLETON_PORT), timeout=timeout)
|
||
sock.settimeout(timeout)
|
||
|
||
msg = json.dumps({"type": cmd_type, "params": params or {}}) + "\n"
|
||
sock.sendall(msg.encode("utf-8"))
|
||
|
||
buf = b""
|
||
while True:
|
||
chunk = sock.recv(65536)
|
||
if not chunk:
|
||
break
|
||
buf += chunk
|
||
if TERMINATOR in buf:
|
||
raw, _, _ = buf.partition(TERMINATOR)
|
||
return json.loads(raw.decode("utf-8"))
|
||
|
||
return {"status": "error", "message": "No response terminator received"}
|
||
except socket.timeout:
|
||
return {"status": "error", "message": f"Command '{cmd_type}' timed out after {timeout}s"}
|
||
except ConnectionRefusedError:
|
||
return {"status": "error", "message": f"Cannot connect to Ableton on {ABLETON_HOST}:{ABLETON_PORT}. Is the Remote Script loaded?"}
|
||
except Exception as e:
|
||
return {"status": "error", "message": str(e)}
|
||
finally:
|
||
if sock:
|
||
try:
|
||
sock.close()
|
||
except Exception:
|
||
pass
|
||
|
||
|
||
def _ok(data: dict) -> str:
|
||
return json.dumps({"status": "success", "result": data}, indent=2)
|
||
|
||
|
||
def _err(msg: str) -> str:
|
||
return json.dumps({"status": "error", "message": msg}, indent=2)
|
||
|
||
|
||
def _ableton_result(resp: dict) -> dict:
|
||
"""Return the nested Ableton payload when present."""
|
||
result = resp.get("result", {})
|
||
return result if isinstance(result, dict) else {}
|
||
|
||
|
||
def _proxy_ableton_command(cmd_type: str, params: dict = None, timeout: Optional[float] = None,
|
||
defaults: dict = None) -> str:
|
||
"""Execute a TCP command against Ableton and wrap the nested result."""
|
||
resp = _send_to_ableton(cmd_type, params or {}, timeout=timeout or TIMEOUTS.get(cmd_type, 15.0))
|
||
if resp.get("status") != "success":
|
||
return _err(resp.get("message", "Unknown error"))
|
||
|
||
payload = dict(defaults or {})
|
||
payload.update(_ableton_result(resp))
|
||
return _ok(payload)
|
||
|
||
|
||
def _warm_engine_imports() -> None:
|
||
"""Preload heavy engine modules before the first MCP tool call.
|
||
|
||
FastMCP handles tool calls on the request path. Some lazy imports work fine in
|
||
direct Python calls but stall badly when they happen inside a live stdio
|
||
CallToolRequest. Warming the heavy workflow modules at startup keeps those
|
||
imports off the request path and avoids false MCP timeouts.
|
||
"""
|
||
warmers = [
|
||
("ProductionWorkflow", lambda: __import__("engines.production_workflow", fromlist=["ProductionWorkflow"]).ProductionWorkflow()),
|
||
("WorkflowEngine", lambda: __import__("engines.workflow_engine", fromlist=["WorkflowEngine"]).WorkflowEngine()),
|
||
("MusicalIntelligenceEngine", lambda: __import__("engines.musical_intelligence", fromlist=["MusicalIntelligenceEngine"]).MusicalIntelligenceEngine()),
|
||
]
|
||
for name, warmer in warmers:
|
||
try:
|
||
warmer()
|
||
logger.info("Warm preload ready: %s", name)
|
||
except Exception:
|
||
logger.exception("Warm preload failed: %s", name)
|
||
|
||
|
||
# ------------------------------------------------------------------
|
||
# Lifespan / startup
|
||
# ------------------------------------------------------------------
|
||
@asynccontextmanager
|
||
async def server_lifespan(server: FastMCP):
|
||
logger.info("AbletonMCP-AI Server starting...")
|
||
_warm_engine_imports()
|
||
# Non-blocking: try to connect to Ableton but don't block startup if unavailable
|
||
try:
|
||
sock = socket.create_connection((ABLETON_HOST, ABLETON_PORT), timeout=2.0)
|
||
sock.settimeout(2.0)
|
||
msg = json.dumps({"type": "get_session_info", "params": {}}) + "\n"
|
||
sock.sendall(msg.encode("utf-8"))
|
||
buf = b""
|
||
sock.settimeout(3.0)
|
||
try:
|
||
while TERMINATOR not in buf:
|
||
chunk = sock.recv(4096)
|
||
if not chunk:
|
||
break
|
||
buf += chunk
|
||
if TERMINATOR in buf:
|
||
raw = buf.split(TERMINATOR)[0]
|
||
info = json.loads(raw.decode("utf-8"))
|
||
r = info.get("result", {})
|
||
logger.info("Connected to Ableton Live: %d BPM, %d tracks",
|
||
r.get("tempo", 0), r.get("num_tracks", 0))
|
||
except Exception:
|
||
logger.warning("Ableton connected but session info unavailable")
|
||
sock.close()
|
||
except ConnectionRefusedError:
|
||
logger.warning("Ableton Live not reachable on %s:%d. Load AbletonMCP_AI as Control Surface.", ABLETON_HOST, ABLETON_PORT)
|
||
except Exception as e:
|
||
logger.warning("Ableton connection check failed: %s", str(e))
|
||
yield
|
||
logger.info("AbletonMCP-AI Server shutting down")
|
||
|
||
|
||
mcp = FastMCP("Ableton Live MCP", lifespan=server_lifespan)
|
||
|
||
|
||
# ==================================================================
|
||
# DEBUG - No dependencies, always works
|
||
# ==================================================================
|
||
@mcp.tool()
|
||
def ping(ctx: Context) -> str:
|
||
"""Simple ping test. Use this to verify MCP connectivity without needing Ableton."""
|
||
tool_count = len(getattr(getattr(mcp, "_tool_manager", None), "_tools", {}))
|
||
return json.dumps({"status": "ok", "message": "pong", "tools": tool_count})
|
||
|
||
|
||
# ==================================================================
|
||
# INFO TOOLS
|
||
# ==================================================================
|
||
@mcp.tool()
|
||
def get_session_info(ctx: Context) -> str:
|
||
"""Get current Ableton Live session information."""
|
||
resp = _send_to_ableton("get_session_info", timeout=TIMEOUTS["get_session_info"])
|
||
if resp.get("status") == "success":
|
||
r = resp["result"]
|
||
return _ok({
|
||
"tempo": r.get("tempo"),
|
||
"num_tracks": r.get("num_tracks"),
|
||
"num_scenes": r.get("num_scenes"),
|
||
"is_playing": r.get("is_playing"),
|
||
"current_song_time": r.get("current_song_time"),
|
||
"metronome": r.get("metronome"),
|
||
"master_volume": r.get("master_volume"),
|
||
})
|
||
return _err(resp.get("message", "Unknown error"))
|
||
|
||
|
||
@mcp.tool()
|
||
def get_tracks(ctx: Context) -> str:
|
||
"""Get list of all tracks in the current project."""
|
||
resp = _send_to_ableton("get_tracks", timeout=TIMEOUTS["get_tracks"])
|
||
if resp.get("status") == "success":
|
||
return _ok(resp.get("result", {}))
|
||
return _err(resp.get("message", "Unknown error"))
|
||
|
||
|
||
@mcp.tool()
|
||
def get_scenes(ctx: Context) -> str:
|
||
"""Get list of all scenes."""
|
||
resp = _send_to_ableton("get_scenes", timeout=TIMEOUTS["get_scenes"])
|
||
if resp.get("status") == "success":
|
||
return _ok(resp.get("result", {}))
|
||
return _err(resp.get("message", "Unknown error"))
|
||
|
||
|
||
@mcp.tool()
|
||
def get_arrangement_clips(ctx: Context, track_index: int = None) -> str:
|
||
"""Read all clips currently placed in Arrangement View.
|
||
|
||
Use this to understand the current song structure — which clips exist,
|
||
where they start, how long they are, and which tracks they're on.
|
||
|
||
Essential for understanding a project before modifying it.
|
||
|
||
Args:
|
||
track_index: Optional. If provided, only returns clips for that track.
|
||
If omitted, returns clips for all tracks.
|
||
|
||
Returns:
|
||
- clips: list with track_index, track_name, name, start_time (beats),
|
||
end_time, length, is_midi, color, muted, looping
|
||
- total_clips: total count
|
||
- arrangement_length_beats: total song length in beats
|
||
- unique_start_positions: sorted list of clip start points (bar map)
|
||
"""
|
||
params = {}
|
||
if track_index is not None:
|
||
params["track_index"] = track_index
|
||
return _proxy_ableton_command("get_arrangement_clips", params, timeout=30.0)
|
||
|
||
|
||
@mcp.tool()
|
||
def get_master_info(ctx: Context) -> str:
|
||
"""Get master track information."""
|
||
resp = _send_to_ableton("get_master_info", timeout=TIMEOUTS["get_master_info"])
|
||
if resp.get("status") == "success":
|
||
return _ok(resp.get("result", {}))
|
||
return _err(resp.get("message", "Unknown error"))
|
||
|
||
|
||
@mcp.tool()
|
||
def health_check(ctx: Context) -> str:
|
||
"""T050: Run a comprehensive health check of the AbletonMCP_AI system.
|
||
|
||
Runs 5 checks:
|
||
1. TCP server connection
|
||
2. Song accessibility
|
||
3. Tracks accessibility
|
||
4. Browser accessibility
|
||
5. update_display drain loop active
|
||
|
||
Returns a score 0-5 with detailed status for each check.
|
||
This should be the first command run after opening Ableton.
|
||
"""
|
||
resp = _send_to_ableton("health_check", timeout=TIMEOUTS["health_check"])
|
||
if resp.get("status") == "success":
|
||
r = resp.get("result", {})
|
||
score = r.get("score", 0)
|
||
status = r.get("status", "UNKNOWN")
|
||
checks = r.get("checks", [])
|
||
recommendation = r.get("recommendation", "")
|
||
|
||
check_summary = []
|
||
for c in checks:
|
||
icon = "OK" if c.get("passed") else "FAIL"
|
||
check_summary.append(" [%s] %s: %s" % (icon, c.get("name", "?"), c.get("detail", "")))
|
||
|
||
return _ok({
|
||
"score": "%d/5" % score,
|
||
"status": status,
|
||
"checks": check_summary,
|
||
"recommendation": recommendation,
|
||
})
|
||
return _err(resp.get("message", "Unknown error"))
|
||
|
||
|
||
# ==================================================================
|
||
# TRANSPORT
|
||
# ==================================================================
|
||
@mcp.tool()
|
||
def start_playback(ctx: Context) -> str:
|
||
"""Start playback."""
|
||
resp = _send_to_ableton("start_playback", timeout=TIMEOUTS["start_playback"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def stop_playback(ctx: Context) -> str:
|
||
"""Stop playback."""
|
||
resp = _send_to_ableton("stop_playback", timeout=TIMEOUTS["stop_playback"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def toggle_playback(ctx: Context) -> str:
|
||
"""Toggle playback (start if stopped, stop if playing)."""
|
||
resp = _send_to_ableton("toggle_playback", timeout=TIMEOUTS["toggle_playback"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def stop_all_clips(ctx: Context) -> str:
|
||
"""Stop all clips in Session View."""
|
||
resp = _send_to_ableton("stop_all_clips", timeout=TIMEOUTS["stop_all_clips"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
# ==================================================================
|
||
# PROJECT SETTINGS
|
||
# ==================================================================
|
||
@mcp.tool()
|
||
def set_tempo(ctx: Context, tempo: float) -> str:
|
||
"""Set the project tempo in BPM."""
|
||
if not 20 <= tempo <= 300:
|
||
return _err(f"Invalid tempo: {tempo}. Must be 20-300 BPM.")
|
||
resp = _send_to_ableton("set_tempo", {"tempo": tempo}, timeout=TIMEOUTS["set_tempo"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def set_time_signature(ctx: Context, numerator: int = 4, denominator: int = 4) -> str:
|
||
"""Set the project time signature."""
|
||
resp = _send_to_ableton("set_signature", {"numerator": numerator, "denominator": denominator},
|
||
timeout=TIMEOUTS["set_signature"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def set_metronome(ctx: Context, enabled: bool) -> str:
|
||
"""Enable or disable metronome."""
|
||
resp = _send_to_ableton("set_metronome", {"enabled": enabled}, timeout=TIMEOUTS["set_metronome"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
# ==================================================================
|
||
# TRACKS
|
||
# ==================================================================
|
||
@mcp.tool()
|
||
def create_midi_track(ctx: Context, index: int = -1) -> str:
|
||
"""Create a new MIDI track. index=-1 appends at the end."""
|
||
resp = _send_to_ableton("create_midi_track", {"index": index}, timeout=TIMEOUTS["create_midi_track"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def create_audio_track(ctx: Context, index: int = -1) -> str:
|
||
"""Create a new audio track. index=-1 appends at the end."""
|
||
resp = _send_to_ableton("create_audio_track", {"index": index}, timeout=TIMEOUTS["create_audio_track"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def set_track_name(ctx: Context, track_index: int, name: str) -> str:
|
||
"""Set the name of a track."""
|
||
resp = _send_to_ableton("set_track_name", {"track_index": track_index, "name": name},
|
||
timeout=TIMEOUTS["set_track_name"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def set_track_volume(ctx: Context, track_index: int, volume: float) -> str:
|
||
"""Set track volume (0.0 - 1.0)."""
|
||
if not 0.0 <= volume <= 1.0:
|
||
return _err(f"Invalid volume: {volume}. Must be 0.0-1.0.")
|
||
resp = _send_to_ableton("set_track_volume", {"track_index": track_index, "volume": volume},
|
||
timeout=TIMEOUTS["set_track_volume"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def set_track_pan(ctx: Context, track_index: int, pan: float) -> str:
|
||
"""Set track pan (-1.0 left to 1.0 right)."""
|
||
if not -1.0 <= pan <= 1.0:
|
||
return _err(f"Invalid pan: {pan}. Must be -1.0 to 1.0.")
|
||
resp = _send_to_ableton("set_track_pan", {"track_index": track_index, "pan": pan},
|
||
timeout=TIMEOUTS["set_track_pan"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def set_track_mute(ctx: Context, track_index: int, mute: bool) -> str:
|
||
"""Mute or unmute a track."""
|
||
resp = _send_to_ableton("set_track_mute", {"track_index": track_index, "mute": mute},
|
||
timeout=TIMEOUTS["set_track_mute"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def set_track_solo(ctx: Context, track_index: int, solo: bool) -> str:
|
||
"""Solo or unsolo a track."""
|
||
resp = _send_to_ableton("set_track_solo", {"track_index": track_index, "solo": solo},
|
||
timeout=TIMEOUTS["set_track_solo"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def set_master_volume(ctx: Context, volume: float) -> str:
|
||
"""Set master track volume (0.0 - 1.0)."""
|
||
if not 0.0 <= volume <= 1.0:
|
||
return _err(f"Invalid volume: {volume}. Must be 0.0-1.0.")
|
||
resp = _send_to_ableton("set_master_volume", {"volume": volume}, timeout=TIMEOUTS["set_master_volume"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
# ==================================================================
|
||
# CLIPS & SESSION VIEW
|
||
# ==================================================================
|
||
@mcp.tool()
|
||
def create_clip(ctx: Context, track_index: int, clip_index: int = 0, length: float = 4.0) -> str:
|
||
"""Create a MIDI clip in Session View."""
|
||
resp = _send_to_ableton("create_clip", {"track_index": track_index, "clip_index": clip_index, "length": length},
|
||
timeout=TIMEOUTS["create_clip"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def add_notes_to_clip(ctx: Context, track_index: int, clip_index: int, notes: list) -> str:
|
||
"""Add MIDI notes to a clip. notes is a list of dicts with keys: pitch, start_time, duration, velocity."""
|
||
resp = _send_to_ableton("add_notes_to_clip",
|
||
{"track_index": track_index, "clip_index": clip_index, "notes": notes},
|
||
timeout=TIMEOUTS["add_notes_to_clip"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def fire_clip(ctx: Context, track_index: int, clip_index: int = 0) -> str:
|
||
"""Fire a clip in Session View."""
|
||
resp = _send_to_ableton("fire_clip", {"track_index": track_index, "clip_index": clip_index},
|
||
timeout=TIMEOUTS["fire_clip"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def fire_scene(ctx: Context, scene_index: int) -> str:
|
||
"""Fire a scene in Session View."""
|
||
resp = _send_to_ableton("fire_scene", {"scene_index": scene_index}, timeout=TIMEOUTS["fire_scene"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def set_scene_name(ctx: Context, scene_index: int, name: str) -> str:
|
||
"""Set the name of a scene."""
|
||
resp = _send_to_ableton("set_scene_name", {"scene_index": scene_index, "name": name},
|
||
timeout=TIMEOUTS["set_scene_name"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def create_scene(ctx: Context, index: int = -1) -> str:
|
||
"""Create a new scene."""
|
||
resp = _send_to_ableton("create_scene", {"index": index}, timeout=TIMEOUTS["create_scene"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
# ==================================================================
|
||
# ARRANGEMENT VIEW - Audio clips
|
||
# ==================================================================
|
||
@mcp.tool()
|
||
def create_arrangement_audio_pattern(ctx: Context, track_index: int, file_path: str,
|
||
positions: list = None, name: str = "") -> str:
|
||
"""Create audio clips in Arrangement View from a .wav file."""
|
||
if positions is None:
|
||
positions = [0]
|
||
resp = _send_to_ableton("create_arrangement_audio_pattern",
|
||
{"track_index": track_index, "file_path": file_path,
|
||
"positions": positions, "name": name},
|
||
timeout=TIMEOUTS["create_arrangement_audio_pattern"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
# ==================================================================
|
||
# GENERATION & SAMPLE SELECTION
|
||
# ==================================================================
|
||
@mcp.tool()
|
||
def generate_track(ctx: Context, genre: str, style: str = "", bpm: float = 0,
|
||
key: str = "", structure: str = "standard") -> str:
|
||
"""Generate a track using AI."""
|
||
resp = _send_to_ableton("generate_track",
|
||
{"genre": genre, "style": style, "bpm": bpm, "key": key, "structure": structure},
|
||
timeout=TIMEOUTS["generate_track"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def generate_song(ctx: Context, genre: str, style: str = "", bpm: float = 0,
|
||
key: str = "", structure: str = "standard") -> str:
|
||
"""Generate a complete song."""
|
||
resp = _send_to_ableton("generate_track",
|
||
{"genre": genre, "style": style, "bpm": bpm, "key": key, "structure": structure},
|
||
timeout=TIMEOUTS["generate_song"])
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def select_samples_for_genre(ctx: Context, genre: str, key: str = "", bpm: float = 0) -> str:
|
||
"""Select samples for a genre from the local library."""
|
||
# Import the sample selector engine
|
||
try:
|
||
from engines.sample_selector import SampleSelector, get_selector
|
||
selector = get_selector()
|
||
if selector is None:
|
||
return _err("Sample selector not available. Check libreria/reggaeton path.")
|
||
group = selector.select_for_genre(genre, key if key else None, bpm if bpm > 0 else None)
|
||
result = {
|
||
"genre": group.genre,
|
||
"key": group.key,
|
||
"bpm": group.bpm,
|
||
"drums": {},
|
||
"bass": [],
|
||
"synths": [],
|
||
"fx": [],
|
||
}
|
||
kit = group.drums
|
||
if kit.kick:
|
||
result["drums"]["kick"] = kit.kick.name
|
||
if kit.snare:
|
||
result["drums"]["snare"] = kit.snare.name
|
||
if kit.clap:
|
||
result["drums"]["clap"] = kit.clap.name
|
||
if kit.hat_closed:
|
||
result["drums"]["hat_closed"] = kit.hat_closed.name
|
||
if kit.hat_open:
|
||
result["drums"]["hat_open"] = kit.hat_open.name
|
||
result["bass"] = [s.name for s in (group.bass or [])[:5]]
|
||
result["synths"] = [s.name for s in (group.synths or [])[:5]]
|
||
result["fx"] = [s.name for s in (group.fx or [])[:3]]
|
||
return _ok(result)
|
||
except ImportError:
|
||
return _err("Sample selector engine not available.")
|
||
except Exception as e:
|
||
return _err(f"Error selecting samples: {str(e)}")
|
||
|
||
|
||
# ==================================================================
|
||
# LIBRARY ANALYSIS TOOLS (Sprint 1 Integration)
|
||
# ==================================================================
|
||
REGGAETON_LIB = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\libreria\reggaeton"
|
||
|
||
# Cache for expensive engine instances
|
||
_analyzer_cache = None
|
||
_embedding_cache = None
|
||
_matcher_cache = None
|
||
|
||
def _get_analyzer():
|
||
"""Lazy-load the LibreriaAnalyzer with caching."""
|
||
global _analyzer_cache
|
||
if _analyzer_cache is None:
|
||
logger.info("Initializing LibreriaAnalyzer cache")
|
||
from engines.libreria_analyzer import LibreriaAnalyzer
|
||
_analyzer_cache = LibreriaAnalyzer(REGGAETON_LIB, verbose=False)
|
||
logger.info("LibreriaAnalyzer cache ready")
|
||
return _analyzer_cache
|
||
|
||
def _get_embedding_engine():
|
||
"""Lazy-load the EmbeddingEngine with caching."""
|
||
global _embedding_cache
|
||
if _embedding_cache is None:
|
||
from engines.embedding_engine import EmbeddingEngine
|
||
_embedding_cache = EmbeddingEngine()
|
||
return _embedding_cache
|
||
|
||
def _get_matcher():
|
||
"""Lazy-load the ReferenceMatcher with caching."""
|
||
global _matcher_cache
|
||
if _matcher_cache is None:
|
||
from engines.reference_matcher import ReferenceMatcher
|
||
ref_path = REGGAETON_LIB + "\\reggaeton_ejemplo.mp3"
|
||
_matcher_cache = ReferenceMatcher(reference_path=ref_path if os.path.isfile(ref_path) else None)
|
||
return _matcher_cache
|
||
|
||
|
||
@mcp.tool()
|
||
def analyze_library(ctx: Context, force_reanalyze: bool = False) -> str:
|
||
"""Analyze all samples in the reggaeton library. Extracts BPM, Key, MFCCs, etc."""
|
||
try:
|
||
analyzer = _get_analyzer()
|
||
result = analyzer.analyze_all(force_reanalyze=force_reanalyze)
|
||
return _ok({
|
||
"total_analyzed": len(result),
|
||
"cache_file": str(analyzer._cache_file),
|
||
})
|
||
except Exception as e:
|
||
return _err(f"Error analyzing library: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def get_library_stats(ctx: Context) -> str:
|
||
"""Get statistics about the analyzed library."""
|
||
try:
|
||
logger.info("get_library_stats: start")
|
||
analyzer = _get_analyzer()
|
||
# Try to load cache from disk first (fast)
|
||
if not analyzer.features:
|
||
analyzer._load_cache()
|
||
# If still no features, return basic file count without full analysis
|
||
if not analyzer.features:
|
||
import glob as _glob
|
||
audio_files = _glob.glob(os.path.join(REGGAETON_LIB, "**", "*.wav"), recursive=True)
|
||
audio_files += _glob.glob(os.path.join(REGGAETON_LIB, "**", "*.mp3"), recursive=True)
|
||
audio_files += _glob.glob(os.path.join(REGGAETON_LIB, "**", "*.aif"), recursive=True)
|
||
audio_files += _glob.glob(os.path.join(REGGAETON_LIB, "**", "*.flac"), recursive=True)
|
||
# Count by folder (role)
|
||
roles = {}
|
||
for f in audio_files:
|
||
parts = f.replace(REGGAETON_LIB, "").split(os.sep)
|
||
role = parts[1] if len(parts) > 1 else "unknown"
|
||
roles[role] = roles.get(role, 0) + 1
|
||
return _ok({
|
||
"total_files_found": len(audio_files),
|
||
"files_by_role": roles,
|
||
"note": "Full spectral analysis not yet performed. Call analyze_library first.",
|
||
})
|
||
stats = analyzer.get_stats()
|
||
logger.info("get_library_stats: done")
|
||
return _ok(stats)
|
||
except Exception as e:
|
||
logger.exception("get_library_stats: failed")
|
||
return _err(f"Error getting library stats: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def get_similar_samples(ctx: Context, sample_path: str, top_n: int = 10) -> str:
|
||
"""Find samples similar to a given sample using embeddings."""
|
||
try:
|
||
emb_engine = _get_embedding_engine()
|
||
results = emb_engine.find_similar(sample_path, top_n=top_n)
|
||
return _ok({"reference": sample_path, "similar": results})
|
||
except Exception as e:
|
||
return _err(f"Error finding similar samples: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def find_samples_like_audio(ctx: Context, audio_path: str, top_n: int = 20, role: str = "") -> str:
|
||
"""Find samples similar to an external audio file (e.g., reggaeton_ejemplo.mp3)."""
|
||
try:
|
||
emb_engine = _get_embedding_engine()
|
||
results = emb_engine.find_by_reference(audio_path, top_n=top_n)
|
||
if role:
|
||
results = [r for r in results if r.get("role", "") == role][:top_n]
|
||
return _ok({"reference": audio_path, "similar": results})
|
||
except Exception as e:
|
||
return _err(f"Error finding samples like audio: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def get_user_sound_profile(ctx: Context) -> str:
|
||
"""Get the user's sound profile based on reggaeton_ejemplo.mp3."""
|
||
try:
|
||
matcher = _get_matcher()
|
||
profile = matcher.get_user_profile()
|
||
return _ok(profile)
|
||
except Exception as e:
|
||
return _err(f"Error getting user profile: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def get_recommended_samples(ctx: Context, role: str = "", count: int = 5) -> str:
|
||
"""Get recommended samples for a role based on user's sound profile."""
|
||
try:
|
||
from engines.reference_matcher import get_recommended_samples as _rec
|
||
results = _rec(role if role else None, count)
|
||
return _ok({"role": role or "all", "samples": results})
|
||
except Exception as e:
|
||
return _err(f"Error getting recommended samples: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def compare_two_samples(ctx: Context, path1: str, path2: str) -> str:
|
||
"""Compare two samples and return similarity score and feature differences."""
|
||
try:
|
||
emb_engine = _get_embedding_engine()
|
||
e1 = emb_engine.get_embedding(path1)
|
||
e2 = emb_engine.get_embedding(path2)
|
||
if e1 is None or e2 is None:
|
||
return _err("One or both samples not found in embeddings index")
|
||
from engines.embedding_engine import cosine_similarity
|
||
sim = cosine_similarity(e1, e2)
|
||
f1 = emb_engine.analyzer.get_features(path1) if hasattr(emb_engine, 'analyzer') else {}
|
||
f2 = emb_engine.analyzer.get_features(path2) if hasattr(emb_engine, 'analyzer') else {}
|
||
return _ok({
|
||
"similarity": float(sim),
|
||
"sample1": {"path": path1, "features": f1},
|
||
"sample2": {"path": path2, "features": f2},
|
||
})
|
||
except Exception as e:
|
||
return _err(f"Error comparing samples: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def browse_library(ctx: Context, pack: str = "", role: str = "", bpm_min: float = 0, bpm_max: float = 0, key: str = "") -> str:
|
||
"""Browse the library with filters for pack, role, BPM range, and key."""
|
||
try:
|
||
analyzer = _get_analyzer()
|
||
if not analyzer.features:
|
||
analyzer.analyze_all()
|
||
results = []
|
||
for path, feats in analyzer.features.items():
|
||
if pack and pack.lower() not in feats.get("pack", "").lower():
|
||
continue
|
||
if role and role.lower() != feats.get("role", "").lower():
|
||
continue
|
||
if key and key.lower() not in feats.get("key", "").lower():
|
||
continue
|
||
bpm = feats.get("bpm", 0)
|
||
if bpm_min > 0 and bpm < bpm_min:
|
||
continue
|
||
if bpm_max > 0 and bpm > bpm_max:
|
||
continue
|
||
results.append({"path": path, **feats})
|
||
return _ok({"total": len(results), "samples": results[:50]})
|
||
except Exception as e:
|
||
return _err(f"Error browsing library: {str(e)}")
|
||
|
||
|
||
# ==================================================================
|
||
# ADVANCED PRODUCTION TOOLS (Sprint 2 - Phase 1 & 2)
|
||
# ==================================================================
|
||
|
||
@mcp.tool()
|
||
def generate_complete_reggaeton(ctx: Context, bpm: float = 95, key: str = "Am",
|
||
style: str = "classic", structure: str = "verse-chorus",
|
||
use_samples: bool = True) -> str:
|
||
"""Generate a complete reggaeton project with all elements.
|
||
|
||
Args:
|
||
bpm: Tempo in BPM (default 95)
|
||
key: Musical key (default Am)
|
||
style: Reggaeton style (classic, dembow, perreo, moombahton)
|
||
structure: Song structure (verse-chorus, full, intro-drop)
|
||
use_samples: Whether to use samples from the library
|
||
|
||
Returns:
|
||
JSON with project summary including tracks created, samples used, and arrangement.
|
||
"""
|
||
try:
|
||
from engines.production_workflow import ProductionWorkflow
|
||
workflow = ProductionWorkflow()
|
||
result = workflow.generate_complete_reggaeton(
|
||
bpm=bpm,
|
||
key=key,
|
||
style=style,
|
||
structure=structure,
|
||
use_samples=use_samples
|
||
)
|
||
return _ok({
|
||
"project_type": "complete_reggaeton",
|
||
"bpm": bpm,
|
||
"key": key,
|
||
"style": style,
|
||
"structure": structure,
|
||
"tracks_created": result.get("tracks", []),
|
||
"samples_used": result.get("samples", {}),
|
||
"arrangement": result.get("arrangement", {}),
|
||
"duration_bars": result.get("duration_bars", 64),
|
||
})
|
||
except ImportError:
|
||
return _err("Production workflow engine not available.")
|
||
except Exception as e:
|
||
return _err(f"Error generating complete reggaeton: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def generate_from_reference(ctx: Context, reference_audio_path: str) -> str:
|
||
"""Generate a track using a reference audio file for style matching.
|
||
|
||
Analyzes the reference audio using the reference_matcher engine,
|
||
finds similar samples from the library, and generates a track
|
||
with matching sonic characteristics.
|
||
|
||
Args:
|
||
reference_audio_path: Path to the reference audio file (.mp3, .wav)
|
||
|
||
Returns:
|
||
JSON with generated tracks info, matched samples, and similarity scores.
|
||
"""
|
||
try:
|
||
from engines.production_workflow import ProductionWorkflow
|
||
|
||
if not os.path.isfile(reference_audio_path):
|
||
return _err(f"Reference audio not found: {reference_audio_path}")
|
||
|
||
workflow = ProductionWorkflow()
|
||
result = workflow.generate_from_reference(reference_audio_path)
|
||
return _ok({
|
||
"reference": reference_audio_path,
|
||
**(result if isinstance(result, dict) else {"result": result}),
|
||
})
|
||
except ImportError as e:
|
||
return _err(f"Required engine not available: {str(e)}")
|
||
except Exception as e:
|
||
return _err(f"Error generating from reference: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def load_sample_to_clip(ctx: Context, track_index: int, clip_index: int, sample_path: str) -> str:
|
||
"""Load an audio sample into a Session View clip slot.
|
||
|
||
Args:
|
||
track_index: Index of the target track
|
||
clip_index: Index of the clip slot
|
||
sample_path: Absolute path to the audio file (.wav, .mp3)
|
||
|
||
Returns:
|
||
JSON with status of the load operation.
|
||
"""
|
||
if not os.path.isfile(sample_path):
|
||
return _err(f"Sample not found: {sample_path}")
|
||
|
||
resp = _send_to_ableton(
|
||
"load_sample_to_clip",
|
||
{"track_index": track_index, "clip_index": clip_index, "sample_path": sample_path},
|
||
timeout=TIMEOUTS["load_sample_to_clip"]
|
||
)
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def load_sample_to_drum_rack(ctx: Context, track_index: int, sample_path: str,
|
||
pad_note: int = 36) -> str:
|
||
"""Load a sample into a specific pad (note) of a Drum Rack.
|
||
|
||
Args:
|
||
track_index: Index of the track containing the Drum Rack
|
||
pad_note: MIDI note number for the pad (default 36 = C1)
|
||
sample_path: Absolute path to the audio file
|
||
|
||
Returns:
|
||
JSON with status of the load operation.
|
||
"""
|
||
if not os.path.isfile(sample_path):
|
||
return _err(f"Sample not found: {sample_path}")
|
||
|
||
resp = _send_to_ableton(
|
||
"load_sample_to_drum_rack_pad",
|
||
{"track_index": track_index, "pad_note": pad_note, "sample_path": sample_path},
|
||
timeout=TIMEOUTS["load_sample_to_drum_rack"]
|
||
)
|
||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def set_warp_markers(ctx: Context, track_index: int, clip_index: int, markers: list) -> str:
|
||
"""Configure warp markers for an audio clip.
|
||
|
||
Sets custom warp markers to adjust timing and groove of audio clips.
|
||
|
||
Args:
|
||
track_index: Index of the track containing the clip
|
||
clip_index: Index of the clip
|
||
markers: List of warp marker positions in bars [{"position": 0.0, "warp_to": 0.0}, ...]
|
||
|
||
Returns:
|
||
JSON with status and number of markers set.
|
||
"""
|
||
resp = _send_to_ableton(
|
||
"set_warp_markers",
|
||
{"track_index": track_index, "clip_index": clip_index, "markers": markers},
|
||
timeout=TIMEOUTS["set_warp_markers"]
|
||
)
|
||
if resp.get("status") == "success":
|
||
return _ok({
|
||
"track_index": track_index,
|
||
"clip_index": clip_index,
|
||
"markers_set": len(markers),
|
||
"markers": markers,
|
||
})
|
||
return _err(resp.get("message"))
|
||
|
||
|
||
@mcp.tool()
|
||
def reverse_clip(ctx: Context, track_index: int, clip_index: int) -> str:
|
||
"""Reverse an audio or MIDI clip.
|
||
|
||
Args:
|
||
track_index: Index of the track containing the clip
|
||
clip_index: Index of the clip to reverse
|
||
|
||
Returns:
|
||
JSON with status of the reverse operation.
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"reverse_clip",
|
||
{"track_index": track_index, "clip_index": clip_index},
|
||
timeout=TIMEOUTS["reverse_clip"],
|
||
defaults={"track_index": track_index, "clip_index": clip_index},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def pitch_shift_clip(ctx: Context, track_index: int, clip_index: int, semitones: float) -> str:
|
||
"""Pitch shift a clip without affecting tempo (using Complex Pro).
|
||
|
||
Args:
|
||
track_index: Index of the track containing the clip
|
||
clip_index: Index of the clip
|
||
semitones: Number of semitones to shift (positive or negative)
|
||
|
||
Returns:
|
||
JSON with new pitch value and status.
|
||
"""
|
||
if not -24.0 <= semitones <= 24.0:
|
||
return _err(f"Invalid pitch shift: {semitones}. Must be -24 to +24 semitones.")
|
||
|
||
return _proxy_ableton_command(
|
||
"pitch_shift_clip",
|
||
{"track_index": track_index, "clip_index": clip_index, "semitones": semitones},
|
||
timeout=TIMEOUTS["pitch_shift_clip"],
|
||
defaults={"track_index": track_index, "clip_index": clip_index, "pitch_shift_semitones": semitones},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def time_stretch_clip(ctx: Context, track_index: int, clip_index: int, factor: float) -> str:
|
||
"""Time stretch a clip without affecting pitch.
|
||
|
||
Args:
|
||
track_index: Index of the track containing the clip
|
||
clip_index: Index of the clip
|
||
factor: Stretch factor (1.0 = normal, 2.0 = half speed/double length, 0.5 = double speed)
|
||
|
||
Returns:
|
||
JSON with new duration and status.
|
||
"""
|
||
if not 0.25 <= factor <= 4.0:
|
||
return _err(f"Invalid stretch factor: {factor}. Must be 0.25x to 4.0x.")
|
||
|
||
return _proxy_ableton_command(
|
||
"time_stretch_clip",
|
||
{"track_index": track_index, "clip_index": clip_index, "factor": factor},
|
||
timeout=TIMEOUTS["time_stretch_clip"],
|
||
defaults={"track_index": track_index, "clip_index": clip_index, "stretch_factor": factor},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def slice_clip(ctx: Context, track_index: int, clip_index: int, num_slices: int = 8) -> str:
|
||
"""Slice an audio clip into multiple segments.
|
||
|
||
Divides a clip into equal slices, useful for creating drum racks
|
||
or rearranging audio segments.
|
||
|
||
Args:
|
||
track_index: Index of the track containing the clip
|
||
clip_index: Index of the clip to slice
|
||
num_slices: Number of slices to create (default 8, max 64)
|
||
|
||
Returns:
|
||
JSON with number of slices created and their positions.
|
||
"""
|
||
if not 2 <= num_slices <= 64:
|
||
return _err(f"Invalid number of slices: {num_slices}. Must be 2-64.")
|
||
|
||
return _proxy_ableton_command(
|
||
"slice_clip",
|
||
{"track_index": track_index, "clip_index": clip_index, "num_slices": num_slices},
|
||
timeout=TIMEOUTS["slice_clip"],
|
||
defaults={"track_index": track_index, "clip_index": clip_index, "num_slices": num_slices},
|
||
)
|
||
|
||
|
||
# ==================================================================
|
||
# FASE 3: MIXING & EFFECTS
|
||
# ==================================================================
|
||
|
||
@mcp.tool()
|
||
def create_bus_track(ctx: Context, bus_type: str = "Group") -> str:
|
||
"""Create a group track (bus) for mixing."""
|
||
return _proxy_ableton_command(
|
||
"create_bus_track",
|
||
{"bus_type": bus_type},
|
||
timeout=TIMEOUTS["create_bus_track"],
|
||
defaults={"bus_type": bus_type},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def route_track_to_bus(ctx: Context, track_index: int, bus_name: str) -> str:
|
||
"""Route a track to a bus/group track."""
|
||
return _proxy_ableton_command(
|
||
"route_track_to_bus",
|
||
{"track_index": track_index, "bus_name": bus_name},
|
||
timeout=TIMEOUTS["route_track_to_bus"],
|
||
defaults={"track_index": track_index, "bus_name": bus_name},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def create_return_track(ctx: Context, effect_type: str = "Reverb") -> str:
|
||
"""Create a return track with an effect."""
|
||
try:
|
||
from engines.mixing_engine import ReturnEffect, get_mixing_engine
|
||
|
||
normalized = effect_type.strip().upper().replace(" ", "_")
|
||
if normalized not in ReturnEffect.__members__:
|
||
return _err(
|
||
f"Unknown return effect '{effect_type}'. Available: {', '.join(ReturnEffect.__members__.keys())}"
|
||
)
|
||
|
||
engine = get_mixing_engine()
|
||
result = engine.return_manager.create_return_track(ReturnEffect[normalized])
|
||
return _ok({
|
||
"effect_type": effect_type,
|
||
"return_index": int(result.track_index),
|
||
"track_name": result.name,
|
||
"parameters": result.effect_parameters,
|
||
})
|
||
except Exception as e:
|
||
return _err(f"Error creating return track: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def set_track_send(ctx: Context, track_index: int, return_index: int, amount: float) -> str:
|
||
"""Configure send amount from a track to a return track."""
|
||
if not 0.0 <= amount <= 1.0:
|
||
return _err(f"Invalid send amount: {amount}. Must be 0.0-1.0.")
|
||
try:
|
||
from engines.mixing_engine import get_mixing_engine
|
||
|
||
engine = get_mixing_engine()
|
||
if engine.return_manager.set_track_send(track_index, return_index, amount):
|
||
return _ok({"track_index": track_index, "return_index": return_index, "amount": amount})
|
||
return _err("Failed to set send")
|
||
except Exception as e:
|
||
return _err(f"Error setting track send: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def insert_device(ctx: Context, track_index: int, device_name: str) -> str:
|
||
"""Insert a device/plugin on a track."""
|
||
resp = _send_to_ableton("insert_device", {"track_index": track_index, "device_name": device_name},
|
||
timeout=TIMEOUTS["insert_device"])
|
||
if resp.get("status") == "success":
|
||
return _ok({"track_index": track_index, "device": device_name, "device_index": resp.get("device_index")})
|
||
return _err(resp.get("message", "Failed to insert device"))
|
||
|
||
|
||
@mcp.tool()
|
||
def configure_eq(ctx: Context, track_index: int, preset: str = "default") -> str:
|
||
"""Configure EQ Eight on a track with a preset."""
|
||
return _proxy_ableton_command(
|
||
"configure_eq",
|
||
{"track_index": track_index, "preset": preset},
|
||
timeout=TIMEOUTS["configure_eq"],
|
||
defaults={"track_index": track_index, "preset": preset},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def configure_compressor(ctx: Context, track_index: int, preset: str = "default",
|
||
threshold: float = -20.0, ratio: float = 4.0) -> str:
|
||
"""Configure Compressor on a track."""
|
||
try:
|
||
from engines.mixing_engine import get_compression_settings
|
||
|
||
compressor = get_compression_settings()
|
||
result = compressor.configure_compressor(
|
||
track_index,
|
||
threshold=threshold,
|
||
ratio=ratio,
|
||
preset=None if preset == "default" else preset,
|
||
)
|
||
if result.get("success"):
|
||
return _ok({
|
||
"track_index": track_index,
|
||
"preset": preset,
|
||
"threshold": threshold,
|
||
"ratio": ratio,
|
||
"settings": result.get("settings", {})
|
||
})
|
||
return _err(result.get("message", "Failed to configure compressor"))
|
||
except Exception as e:
|
||
return _err(f"Error configuring compressor: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def setup_sidechain(ctx: Context, source_track: int, target_track: int, amount: float = 0.5) -> str:
|
||
"""Setup sidechain compression from source track to target track."""
|
||
if not 0.0 <= amount <= 1.0:
|
||
return _err(f"Invalid sidechain amount: {amount}. Must be 0.0-1.0.")
|
||
return _proxy_ableton_command(
|
||
"setup_sidechain",
|
||
{"source_track": source_track, "target_track": target_track, "amount": amount},
|
||
timeout=TIMEOUTS["setup_sidechain"],
|
||
defaults={"source_track": source_track, "target_track": target_track, "amount": amount},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def auto_gain_staging(ctx: Context) -> str:
|
||
"""Automatically adjust gain staging for all tracks."""
|
||
try:
|
||
from engines.mixing_engine import get_gain_staging
|
||
|
||
tracks_resp = _send_to_ableton("get_tracks", timeout=TIMEOUTS["get_tracks"])
|
||
if tracks_resp.get("status") != "success":
|
||
return _err(tracks_resp.get("message", "Failed to read tracks from Ableton"))
|
||
|
||
tracks = _ableton_result(tracks_resp).get("tracks", [])
|
||
track_config = [
|
||
{"track_index": t.get("index", 0), "name": t.get("name", ""), "role": t.get("name", "")}
|
||
for t in tracks
|
||
]
|
||
|
||
result = get_gain_staging().auto_gain_staging(track_config)
|
||
if result.get("success"):
|
||
return _ok({
|
||
"tracks_adjusted": result.get("total_tracks", 0),
|
||
"adjustments": result.get("applied_levels", []),
|
||
"headroom_ok": result.get("headroom_ok", False),
|
||
})
|
||
return _err(result.get("message", "Failed to adjust gain staging"))
|
||
except Exception as e:
|
||
return _err(f"Error in auto gain staging: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def apply_master_chain(ctx: Context, preset: str = "standard") -> str:
|
||
"""Apply a mastering chain to the master track."""
|
||
try:
|
||
from engines.mixing_engine import get_master_chain
|
||
|
||
selected_preset = "reggaeton_streaming" if preset == "standard" else preset
|
||
result = get_master_chain().apply_master_chain(selected_preset)
|
||
if result.get("success"):
|
||
return _ok({
|
||
"preset": selected_preset,
|
||
"devices_added": result.get("chain_applied", []),
|
||
"master_track": "Master"
|
||
})
|
||
return _err(result.get("message", "Failed to apply master chain"))
|
||
except Exception as e:
|
||
return _err(f"Error applying master chain: {str(e)}")
|
||
|
||
|
||
# ==================================================================
|
||
# FASE 4: WORKFLOW & EXPORT
|
||
# ==================================================================
|
||
|
||
@mcp.tool()
|
||
def export_project(ctx: Context, path: str, format: str = "wav") -> str:
|
||
"""Export the project to audio file."""
|
||
try:
|
||
from engines.workflow_engine import WorkflowEngine
|
||
engine = WorkflowEngine()
|
||
result = engine.export_project(path, format)
|
||
if result.get("success"):
|
||
return _ok({
|
||
"export_path": path,
|
||
"format": format,
|
||
"duration": result.get("duration"),
|
||
"file_size": result.get("file_size")
|
||
})
|
||
return _err(result.get("message", "Failed to export project"))
|
||
except Exception as e:
|
||
return _err(f"Error exporting project: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def get_project_summary(ctx: Context) -> str:
|
||
"""Get a summary of the current project from Ableton Live."""
|
||
try:
|
||
resp = _send_to_ableton("get_session_info", timeout=5.0)
|
||
if resp.get("status") != "success":
|
||
return _err(f"Cannot get session info: {resp.get('message')}")
|
||
session = resp.get("result", {})
|
||
tracks_resp = _send_to_ableton("get_tracks", timeout=5.0)
|
||
tracks = tracks_resp.get("result", {}).get("tracks", []) if tracks_resp.get("status") == "success" else []
|
||
midi_count = sum(1 for t in tracks if t.get("is_midi"))
|
||
audio_count = sum(1 for t in tracks if t.get("is_audio"))
|
||
device_names = list(set(d for t in tracks for d in t.get("devices", [])))
|
||
return _ok({
|
||
"track_count": session.get("num_tracks", len(tracks)),
|
||
"midi_tracks": midi_count,
|
||
"audio_tracks": audio_count,
|
||
"return_tracks": session.get("num_return_tracks", 0),
|
||
"clips": sum(t.get("clip_slots", 0) for t in tracks),
|
||
"scenes": session.get("num_scenes", 0),
|
||
"devices_used": device_names[:20],
|
||
"duration_minutes": 0,
|
||
"project_name": "Live Project",
|
||
"tempo": session.get("tempo", 0),
|
||
"is_playing": session.get("is_playing", False),
|
||
})
|
||
except Exception as e:
|
||
return _err(f"Error getting project summary: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def suggest_improvements(ctx: Context) -> str:
|
||
"""Get AI suggestions for improving the project."""
|
||
try:
|
||
from engines.workflow_engine import WorkflowEngine
|
||
engine = WorkflowEngine()
|
||
result = engine.suggest_improvements()
|
||
return _ok({
|
||
"suggestions": result.get("suggestions", []),
|
||
"priority": result.get("priority", "medium"),
|
||
"categories": result.get("categories", {}),
|
||
"estimated_impact": result.get("estimated_impact", "medium")
|
||
})
|
||
except Exception as e:
|
||
return _err(f"Error generating suggestions: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def validate_project(ctx: Context) -> str:
|
||
"""Validate project consistency and best practices using live Ableton data."""
|
||
try:
|
||
tracks_resp = _send_to_ableton("get_tracks", timeout=5.0)
|
||
tracks = tracks_resp.get("result", {}).get("tracks", []) if tracks_resp.get("status") == "success" else []
|
||
session_resp = _send_to_ableton("get_session_info", timeout=5.0)
|
||
session = session_resp.get("result", {}) if session_resp.get("status") == "success" else {}
|
||
issues = []
|
||
warnings = []
|
||
passed = []
|
||
track_count = len(tracks)
|
||
if track_count == 0:
|
||
issues.append("No tracks in project")
|
||
else:
|
||
passed.append(f"{track_count} tracks found")
|
||
midi_tracks = [t for t in tracks if t.get("is_midi")]
|
||
audio_tracks = [t for t in tracks if t.get("is_audio")]
|
||
if not midi_tracks and not audio_tracks:
|
||
warnings.append("All tracks appear to be return or master tracks")
|
||
if session.get("tempo", 0) < 60 or session.get("tempo", 0) > 200:
|
||
warnings.append(f"Unusual tempo: {session.get('tempo')} BPM")
|
||
else:
|
||
passed.append(f"Tempo OK: {session.get('tempo')} BPM")
|
||
muted = [t["name"] for t in tracks if t.get("mute")]
|
||
if muted:
|
||
warnings.append(f"Muted tracks: {', '.join(muted)}")
|
||
empty = [t["name"] for t in tracks if t.get("clip_slots", 0) == 0]
|
||
if empty:
|
||
warnings.append(f"Tracks with no clip slots: {', '.join(empty)}")
|
||
score = max(0, 100 - len(issues) * 25 - len(warnings) * 10)
|
||
return _ok({
|
||
"is_valid": len(issues) == 0,
|
||
"issues": issues,
|
||
"warnings": warnings,
|
||
"passed_checks": passed,
|
||
"score": score,
|
||
"track_count": track_count,
|
||
"midi_count": len(midi_tracks),
|
||
"audio_count": len(audio_tracks),
|
||
})
|
||
except Exception as e:
|
||
return _err(f"Error validating project: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def humanize_track(ctx: Context, track_index: int, intensity: float = 0.5) -> str:
|
||
"""Apply humanization to a MIDI track (velocity and timing variations)."""
|
||
if not 0.0 <= intensity <= 1.0:
|
||
return _err(f"Invalid intensity: {intensity}. Must be 0.0-1.0.")
|
||
return _proxy_ableton_command(
|
||
"humanize_track",
|
||
{"track_index": track_index, "intensity": intensity},
|
||
timeout=TIMEOUTS["humanize_track"],
|
||
defaults={"track_index": track_index, "intensity": intensity},
|
||
)
|
||
|
||
|
||
# ==================================================================
|
||
# FASE 5: PHASE 1 - BRIDGE ENGINES → ABLETON (T001-T015 + T081-T085)
|
||
# ==================================================================
|
||
|
||
# ------------------------------------------------------------------
|
||
# Production Pipeline Tools (T081-T085)
|
||
# ------------------------------------------------------------------
|
||
|
||
@mcp.tool()
|
||
def produce_reggaeton(ctx: Context, bpm: float = 95, key: str = "Am",
|
||
style: str = "classic", structure: str = "verse-chorus",
|
||
record_arrangement: bool = True) -> str:
|
||
"""Generate a complete reggaeton production pipeline (T081) - Session View based.
|
||
|
||
DEPRECATED: Consider using build_arrangement_timeline() for direct Arrangement View creation.
|
||
|
||
This tool creates content in Session View clips first. For direct timeline-based
|
||
composition without the Session View intermediate step, use build_arrangement_timeline().
|
||
|
||
MIGRATION GUIDE:
|
||
- OLD: produce_reggaeton() → Session View clips → manual arrangement
|
||
- NEW: build_arrangement_timeline() → Direct Arrangement View placement
|
||
|
||
Args:
|
||
bpm: Tempo in BPM (default 95)
|
||
key: Musical key (default Am)
|
||
style: Reggaeton style (classic, dembow, perreo, moombahton)
|
||
structure: Song structure (verse-chorus, full, intro-drop)
|
||
record_arrangement: Record to Arrangement View automatically (default True)
|
||
|
||
Returns:
|
||
JSON with complete production summary.
|
||
"""
|
||
try:
|
||
logger.info("produce_reggaeton: start bpm=%s key=%s style=%s structure=%s", bpm, key, style, structure)
|
||
from engines.production_workflow import ProductionWorkflow
|
||
workflow = ProductionWorkflow()
|
||
result = workflow.produce_reggaeton(
|
||
bpm=bpm, key=key, style=style, structure=structure,
|
||
record_arrangement=record_arrangement
|
||
)
|
||
logger.info("produce_reggaeton: workflow returned")
|
||
return _ok({
|
||
"production_type": "reggaeton",
|
||
"bpm": bpm,
|
||
"key": key,
|
||
"style": style,
|
||
"structure": structure,
|
||
"record_arrangement": record_arrangement,
|
||
"tracks_created": result.get("tracks", []),
|
||
"clips_generated": result.get("clips", []),
|
||
"duration_bars": result.get("duration_bars", 64),
|
||
})
|
||
except ImportError:
|
||
logger.exception("produce_reggaeton: import error")
|
||
return _err("Production workflow engine not available.")
|
||
except Exception as e:
|
||
logger.exception("produce_reggaeton: failed")
|
||
return _err(f"Error producing reggaeton: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def produce_from_reference(ctx: Context, audio_path: str) -> str:
|
||
"""Generate production from a reference audio file (T082).
|
||
|
||
Analyzes the reference audio and generates a matching production.
|
||
|
||
Args:
|
||
audio_path: Path to the reference audio file (.mp3, .wav)
|
||
|
||
Returns:
|
||
JSON with production details and similarity analysis.
|
||
"""
|
||
if not os.path.isfile(audio_path):
|
||
return _err(f"Reference audio not found: {audio_path}")
|
||
try:
|
||
from engines.production_workflow import ProductionWorkflow
|
||
workflow = ProductionWorkflow()
|
||
result = workflow.produce_from_reference(reference_path=audio_path)
|
||
return _ok({
|
||
"reference": audio_path,
|
||
"production_type": "from_reference",
|
||
**(result if isinstance(result, dict) else {"result": result}),
|
||
})
|
||
except ImportError:
|
||
return _err("Production workflow or reference matcher engine not available.")
|
||
except Exception as e:
|
||
return _err(f"Error producing from reference: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def produce_arrangement(ctx: Context, bpm: float = 95, key: str = "Am",
|
||
style: str = "classic") -> str:
|
||
"""Generate production directly in Arrangement View (T083).
|
||
|
||
Creates a complete song structure in Arrangement View.
|
||
|
||
Args:
|
||
bpm: Tempo in BPM (default 95)
|
||
key: Musical key (default Am)
|
||
style: Production style (classic, modern, perreo, moombahton)
|
||
|
||
Returns:
|
||
JSON with arrangement details and clip positions.
|
||
"""
|
||
try:
|
||
from engines.production_workflow import ProductionWorkflow
|
||
workflow = ProductionWorkflow()
|
||
result = workflow.produce_arrangement(
|
||
bpm=bpm, key=key, style=style
|
||
)
|
||
return _ok({
|
||
"production_type": "arrangement",
|
||
"view": "Arrangement",
|
||
"bpm": bpm,
|
||
"key": key,
|
||
"style": style,
|
||
"tracks_created": result.get("tracks", []),
|
||
"clips_arranged": result.get("clips", []),
|
||
"total_bars": result.get("total_bars", 128),
|
||
})
|
||
except ImportError:
|
||
return _err("Production workflow engine not available.")
|
||
except Exception as e:
|
||
return _err(f"Error producing arrangement: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def complete_production(ctx: Context, bpm: float = 95, key: str = "Am",
|
||
style: str = "classic", output_dir: str = "") -> str:
|
||
"""Complete production pipeline with render (T084).
|
||
|
||
Generates a full production and renders it to audio.
|
||
|
||
Args:
|
||
bpm: Tempo in BPM (default 95)
|
||
key: Musical key (default Am)
|
||
style: Production style
|
||
output_dir: Directory for rendered output (optional)
|
||
|
||
Returns:
|
||
JSON with production summary and render path.
|
||
"""
|
||
try:
|
||
from engines.production_workflow import ProductionWorkflow
|
||
from engines.workflow_engine import WorkflowEngine
|
||
workflow = ProductionWorkflow()
|
||
result = workflow.complete_production(
|
||
bpm=bpm, key=key, style=style
|
||
)
|
||
render_path = ""
|
||
if output_dir and os.path.isdir(output_dir):
|
||
wf_engine = WorkflowEngine()
|
||
render_result = wf_engine.export_project(
|
||
path=os.path.join(output_dir, f"production_{int(time.time())}.wav"),
|
||
format="wav"
|
||
)
|
||
render_path = render_result.get("export_path", "")
|
||
return _ok({
|
||
"production_type": "complete",
|
||
"bpm": bpm,
|
||
"key": key,
|
||
"style": style,
|
||
"tracks_created": result.get("tracks", []),
|
||
"clips_generated": result.get("clips", []),
|
||
"render_path": render_path,
|
||
})
|
||
except ImportError:
|
||
return _err("Production workflow engine not available.")
|
||
except Exception as e:
|
||
return _err(f"Error in complete production: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def batch_produce(ctx: Context, count: int = 3, style: str = "classic",
|
||
bpm_range: str = "90-100") -> str:
|
||
"""Batch produce multiple songs (T085).
|
||
|
||
Generates multiple productions with varying parameters.
|
||
|
||
Args:
|
||
count: Number of songs to produce (default 3, max 10)
|
||
style: Production style
|
||
bpm_range: BPM range as "min-max" string
|
||
|
||
Returns:
|
||
JSON with batch production summary.
|
||
"""
|
||
if not 1 <= count <= 10:
|
||
return _err(f"Invalid count: {count}. Must be 1-10.")
|
||
try:
|
||
from engines.production_workflow import ProductionWorkflow
|
||
workflow = ProductionWorkflow()
|
||
results = []
|
||
bpms = []
|
||
if "-" in bpm_range:
|
||
parts = bpm_range.split("-")
|
||
bpm_min, bpm_max = int(parts[0]), int(parts[1])
|
||
import random
|
||
bpms = [random.randint(bpm_min, bpm_max) for _ in range(count)]
|
||
else:
|
||
bpms = [int(bpm_range)] * count
|
||
keys = ["Am", "Dm", "Em", "Gm", "Cm"]
|
||
for i in range(count):
|
||
result = workflow.produce_reggaeton(
|
||
bpm=bpms[i],
|
||
key=keys[i % len(keys)],
|
||
style=style,
|
||
structure="verse-chorus"
|
||
)
|
||
results.append({
|
||
"index": i + 1,
|
||
"bpm": bpms[i],
|
||
"key": keys[i % len(keys)],
|
||
"tracks": len(result.get("tracks", [])),
|
||
})
|
||
return _ok({
|
||
"batch_size": count,
|
||
"style": style,
|
||
"bpm_range": bpm_range,
|
||
"productions": results,
|
||
})
|
||
except ImportError:
|
||
return _err("Production workflow engine not available.")
|
||
except Exception as e:
|
||
return _err(f"Error in batch production: {str(e)}")
|
||
|
||
|
||
# ------------------------------------------------------------------
|
||
# MIDI Clip Generator Tools (T001-T005)
|
||
# ------------------------------------------------------------------
|
||
|
||
@mcp.tool()
|
||
def generate_midi_clip(ctx: Context, track_index: int, clip_index: int = 0,
|
||
notes: list = None) -> str:
|
||
"""Create a MIDI clip with specified notes (T001).
|
||
|
||
Args:
|
||
track_index: Index of the target track
|
||
clip_index: Index of the clip slot (default 0)
|
||
notes: List of note dicts with pitch, start_time, duration, velocity
|
||
|
||
Returns:
|
||
JSON with clip creation status.
|
||
"""
|
||
if notes is None:
|
||
notes = []
|
||
try:
|
||
resp = _send_to_ableton(
|
||
"create_clip",
|
||
{"track_index": track_index, "clip_index": clip_index, "length": 4.0},
|
||
timeout=TIMEOUTS["generate_midi_clip"]
|
||
)
|
||
if resp.get("status") == "success" and notes:
|
||
resp2 = _send_to_ableton(
|
||
"add_notes_to_clip",
|
||
{"track_index": track_index, "clip_index": clip_index, "notes": notes},
|
||
timeout=TIMEOUTS["generate_midi_clip"]
|
||
)
|
||
if resp2.get("status") == "success":
|
||
return _ok({
|
||
"track_index": track_index,
|
||
"clip_index": clip_index,
|
||
"notes_added": len(notes),
|
||
})
|
||
return _err(resp2.get("message", "Failed to add notes"))
|
||
return _ok({
|
||
"track_index": track_index,
|
||
"clip_index": clip_index,
|
||
"notes_added": 0,
|
||
"created_empty": True,
|
||
})
|
||
except Exception as e:
|
||
return _err(f"Error generating MIDI clip: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def generate_dembow_clip(ctx: Context, track_index: int, clip_index: int = 0,
|
||
bars: int = 4, variation: str = "standard") -> str:
|
||
"""Generate a dembow rhythm MIDI clip (T002).
|
||
|
||
Creates a classic reggaeton dembow pattern.
|
||
|
||
Args:
|
||
track_index: Index of the target track
|
||
clip_index: Index of the clip slot (default 0)
|
||
bars: Number of bars (default 4)
|
||
variation: Pattern variation (standard, minimal, complex, fill)
|
||
|
||
Returns:
|
||
JSON with clip generation status.
|
||
"""
|
||
try:
|
||
patterns = {
|
||
"standard": [
|
||
{"pitch": 36, "start_time": 0.0, "duration": 0.25, "velocity": 100},
|
||
{"pitch": 42, "start_time": 0.25, "duration": 0.25, "velocity": 80},
|
||
{"pitch": 38, "start_time": 0.5, "duration": 0.25, "velocity": 90},
|
||
{"pitch": 42, "start_time": 0.75, "duration": 0.25, "velocity": 80},
|
||
{"pitch": 36, "start_time": 1.0, "duration": 0.25, "velocity": 100},
|
||
{"pitch": 42, "start_time": 1.25, "duration": 0.25, "velocity": 80},
|
||
{"pitch": 38, "start_time": 1.5, "duration": 0.25, "velocity": 90},
|
||
{"pitch": 42, "start_time": 1.75, "duration": 0.25, "velocity": 80},
|
||
],
|
||
"minimal": [
|
||
{"pitch": 36, "start_time": 0.0, "duration": 0.5, "velocity": 100},
|
||
{"pitch": 42, "start_time": 0.5, "duration": 0.5, "velocity": 80},
|
||
{"pitch": 36, "start_time": 1.0, "duration": 0.5, "velocity": 100},
|
||
{"pitch": 42, "start_time": 1.5, "duration": 0.5, "velocity": 80},
|
||
],
|
||
"complex": [
|
||
{"pitch": 36, "start_time": 0.0, "duration": 0.25, "velocity": 100},
|
||
{"pitch": 42, "start_time": 0.125, "duration": 0.125, "velocity": 70},
|
||
{"pitch": 42, "start_time": 0.25, "duration": 0.25, "velocity": 80},
|
||
{"pitch": 38, "start_time": 0.5, "duration": 0.25, "velocity": 90},
|
||
{"pitch": 42, "start_time": 0.625, "duration": 0.125, "velocity": 70},
|
||
{"pitch": 42, "start_time": 0.75, "duration": 0.25, "velocity": 80},
|
||
{"pitch": 36, "start_time": 1.0, "duration": 0.25, "velocity": 100},
|
||
{"pitch": 42, "start_time": 1.125, "duration": 0.125, "velocity": 70},
|
||
{"pitch": 42, "start_time": 1.25, "duration": 0.25, "velocity": 80},
|
||
{"pitch": 38, "start_time": 1.5, "duration": 0.25, "velocity": 90},
|
||
{"pitch": 42, "start_time": 1.75, "duration": 0.25, "velocity": 80},
|
||
],
|
||
"fill": [
|
||
{"pitch": 36, "start_time": 0.0, "duration": 0.25, "velocity": 100},
|
||
{"pitch": 38, "start_time": 0.25, "duration": 0.25, "velocity": 100},
|
||
{"pitch": 42, "start_time": 0.5, "duration": 0.25, "velocity": 100},
|
||
{"pitch": 38, "start_time": 0.75, "duration": 0.25, "velocity": 100},
|
||
],
|
||
}
|
||
notes = patterns.get(variation, patterns["standard"])
|
||
full_notes = []
|
||
for bar in range(bars):
|
||
for note in notes:
|
||
full_notes.append({
|
||
"pitch": note["pitch"],
|
||
"start_time": note["start_time"] + (bar * 2.0),
|
||
"duration": note["duration"],
|
||
"velocity": note["velocity"],
|
||
})
|
||
resp = _send_to_ableton(
|
||
"create_clip",
|
||
{"track_index": track_index, "clip_index": clip_index, "length": float(bars * 2)},
|
||
timeout=TIMEOUTS["generate_dembow_clip"]
|
||
)
|
||
if resp.get("status") == "success":
|
||
resp2 = _send_to_ableton(
|
||
"add_notes_to_clip",
|
||
{"track_index": track_index, "clip_index": clip_index, "notes": full_notes},
|
||
timeout=TIMEOUTS["generate_dembow_clip"]
|
||
)
|
||
if resp2.get("status") == "success":
|
||
return _ok({
|
||
"track_index": track_index,
|
||
"clip_index": clip_index,
|
||
"variation": variation,
|
||
"bars": bars,
|
||
"notes_added": len(full_notes),
|
||
})
|
||
return _err(resp.get("message", "Failed to create dembow clip"))
|
||
except Exception as e:
|
||
return _err(f"Error generating dembow clip: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def generate_bass_clip(ctx: Context, track_index: int, clip_index: int = 0,
|
||
bars: int = 4, root_notes: list = None, style: str = "standard") -> str:
|
||
"""Generate a bassline MIDI clip (T003).
|
||
|
||
Creates a reggaeton-style bassline pattern.
|
||
|
||
Args:
|
||
track_index: Index of the target track
|
||
clip_index: Index of the clip slot (default 0)
|
||
bars: Number of bars (default 4)
|
||
root_notes: List of root note pitches (default [36, 36, 36, 36])
|
||
style: Bass style (standard, melodic, staccato, slides)
|
||
|
||
Returns:
|
||
JSON with clip generation status.
|
||
"""
|
||
if root_notes is None:
|
||
root_notes = [36] * 4
|
||
try:
|
||
notes = []
|
||
base_octave = 36
|
||
for bar in range(bars):
|
||
root = root_notes[bar % len(root_notes)] if root_notes else base_octave
|
||
if style == "standard":
|
||
notes.extend([
|
||
{"pitch": root, "start_time": bar * 2.0, "duration": 0.5, "velocity": 100},
|
||
{"pitch": root, "start_time": bar * 2.0 + 0.5, "duration": 0.5, "velocity": 90},
|
||
{"pitch": root, "start_time": bar * 2.0 + 1.0, "duration": 0.5, "velocity": 100},
|
||
{"pitch": root + 7, "start_time": bar * 2.0 + 1.5, "duration": 0.5, "velocity": 80},
|
||
])
|
||
elif style == "melodic":
|
||
notes.extend([
|
||
{"pitch": root, "start_time": bar * 2.0, "duration": 0.75, "velocity": 100},
|
||
{"pitch": root + 4, "start_time": bar * 2.0 + 0.75, "duration": 0.25, "velocity": 80},
|
||
{"pitch": root + 7, "start_time": bar * 2.0 + 1.0, "duration": 0.5, "velocity": 90},
|
||
{"pitch": root, "start_time": bar * 2.0 + 1.5, "duration": 0.5, "velocity": 85},
|
||
])
|
||
elif style == "staccato":
|
||
notes.extend([
|
||
{"pitch": root, "start_time": bar * 2.0, "duration": 0.125, "velocity": 110},
|
||
{"pitch": root, "start_time": bar * 2.0 + 0.5, "duration": 0.125, "velocity": 100},
|
||
{"pitch": root, "start_time": bar * 2.0 + 1.0, "duration": 0.125, "velocity": 110},
|
||
{"pitch": root, "start_time": bar * 2.0 + 1.5, "duration": 0.125, "velocity": 100},
|
||
])
|
||
else: # slides or default
|
||
notes.extend([
|
||
{"pitch": root, "start_time": bar * 2.0, "duration": 1.0, "velocity": 100},
|
||
{"pitch": root + 12, "start_time": bar * 2.0 + 1.0, "duration": 0.25, "velocity": 90},
|
||
{"pitch": root, "start_time": bar * 2.0 + 1.5, "duration": 0.5, "velocity": 80},
|
||
])
|
||
resp = _send_to_ableton(
|
||
"create_clip",
|
||
{"track_index": track_index, "clip_index": clip_index, "length": float(bars * 2)},
|
||
timeout=TIMEOUTS["generate_bass_clip"]
|
||
)
|
||
if resp.get("status") == "success":
|
||
resp2 = _send_to_ableton(
|
||
"add_notes_to_clip",
|
||
{"track_index": track_index, "clip_index": clip_index, "notes": notes},
|
||
timeout=TIMEOUTS["generate_bass_clip"]
|
||
)
|
||
if resp2.get("status") == "success":
|
||
return _ok({
|
||
"track_index": track_index,
|
||
"clip_index": clip_index,
|
||
"style": style,
|
||
"bars": bars,
|
||
"notes_added": len(notes),
|
||
})
|
||
return _err(resp.get("message", "Failed to create bass clip"))
|
||
except Exception as e:
|
||
return _err(f"Error generating bass clip: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def generate_chords_clip(ctx: Context, track_index: int, clip_index: int = 0,
|
||
bars: int = 4, progression: str = "i-v-vi-iv", key: str = "Am") -> str:
|
||
"""Generate a chord progression MIDI clip (T004).
|
||
|
||
Creates chord patterns for reggaeton progressions.
|
||
|
||
Args:
|
||
track_index: Index of the target track
|
||
clip_index: Index of the clip slot (default 0)
|
||
bars: Number of bars (default 4)
|
||
progression: Roman numeral progression (default "i-v-vi-iv")
|
||
key: Musical key (default Am)
|
||
|
||
Returns:
|
||
JSON with clip generation status.
|
||
"""
|
||
try:
|
||
progressions = {
|
||
"i-v-vi-iv": [0, 7, 9, 5],
|
||
"i-iv-v": [0, 5, 7],
|
||
"i-vi-iv-v": [0, 9, 5, 7],
|
||
"i-v-i-v": [0, 7, 0, 7],
|
||
"i-iv-i-v": [0, 5, 0, 7],
|
||
}
|
||
offsets = progressions.get(progression, progressions["i-v-vi-iv"])
|
||
base_note = 48 if key.endswith("m") else 60
|
||
if key.startswith("C"): base_note = 48 if key.endswith("m") else 60
|
||
elif key.startswith("D"): base_note = 50 if key.endswith("m") else 62
|
||
elif key.startswith("E"): base_note = 52 if key.endswith("m") else 64
|
||
elif key.startswith("F"): base_note = 53 if key.endswith("m") else 65
|
||
elif key.startswith("G"): base_note = 55 if key.endswith("m") else 67
|
||
elif key.startswith("A"): base_note = 45 if key.endswith("m") else 57
|
||
elif key.startswith("B"): base_note = 47 if key.endswith("m") else 59
|
||
notes = []
|
||
chord_length = bars // len(offsets) if bars >= len(offsets) else 1
|
||
for i, offset in enumerate(offsets):
|
||
for bar in range(chord_length):
|
||
root = base_note + offset
|
||
if key.endswith("m"):
|
||
notes.extend([
|
||
{"pitch": root, "start_time": i * chord_length * 2.0 + bar * 2.0, "duration": 2.0, "velocity": 70},
|
||
{"pitch": root + 3, "start_time": i * chord_length * 2.0 + bar * 2.0, "duration": 2.0, "velocity": 70},
|
||
{"pitch": root + 7, "start_time": i * chord_length * 2.0 + bar * 2.0, "duration": 2.0, "velocity": 70},
|
||
])
|
||
else:
|
||
notes.extend([
|
||
{"pitch": root, "start_time": i * chord_length * 2.0 + bar * 2.0, "duration": 2.0, "velocity": 70},
|
||
{"pitch": root + 4, "start_time": i * chord_length * 2.0 + bar * 2.0, "duration": 2.0, "velocity": 70},
|
||
{"pitch": root + 7, "start_time": i * chord_length * 2.0 + bar * 2.0, "duration": 2.0, "velocity": 70},
|
||
])
|
||
resp = _send_to_ableton(
|
||
"create_clip",
|
||
{"track_index": track_index, "clip_index": clip_index, "length": float(bars * 2)},
|
||
timeout=TIMEOUTS["generate_chords_clip"]
|
||
)
|
||
if resp.get("status") == "success":
|
||
resp2 = _send_to_ableton(
|
||
"add_notes_to_clip",
|
||
{"track_index": track_index, "clip_index": clip_index, "notes": notes},
|
||
timeout=TIMEOUTS["generate_chords_clip"]
|
||
)
|
||
if resp2.get("status") == "success":
|
||
return _ok({
|
||
"track_index": track_index,
|
||
"clip_index": clip_index,
|
||
"progression": progression,
|
||
"key": key,
|
||
"bars": bars,
|
||
"notes_added": len(notes),
|
||
})
|
||
return _err(resp.get("message", "Failed to create chords clip"))
|
||
except Exception as e:
|
||
return _err(f"Error generating chords clip: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def generate_melody_clip(ctx: Context, track_index: int, clip_index: int = 0,
|
||
bars: int = 4, scale: str = "minor", density: str = "medium") -> str:
|
||
"""Generate a melodic line MIDI clip (T005).
|
||
|
||
Creates a melody pattern for reggaeton.
|
||
|
||
Args:
|
||
track_index: Index of the target track
|
||
clip_index: Index of the clip slot (default 0)
|
||
bars: Number of bars (default 4)
|
||
scale: Scale type (minor, major, harmonic_minor, pentatonic)
|
||
density: Note density (sparse, medium, dense)
|
||
|
||
Returns:
|
||
JSON with clip generation status.
|
||
"""
|
||
try:
|
||
scales = {
|
||
"minor": [60, 62, 63, 65, 67, 68, 70, 72],
|
||
"major": [60, 62, 64, 65, 67, 69, 71, 72],
|
||
"harmonic_minor": [60, 62, 63, 65, 67, 68, 71, 72],
|
||
"pentatonic": [60, 62, 64, 67, 69, 72],
|
||
}
|
||
scale_notes = scales.get(scale, scales["minor"])
|
||
density_ratios = {"sparse": 0.25, "medium": 0.5, "dense": 0.75}
|
||
ratio = density_ratios.get(density, 0.5)
|
||
import random
|
||
random.seed(42)
|
||
notes = []
|
||
sixteenth = 2.0 / 16
|
||
for bar in range(bars):
|
||
for step in range(16):
|
||
if random.random() < ratio:
|
||
note_pitch = random.choice(scale_notes)
|
||
start = bar * 2.0 + step * sixteenth
|
||
duration = sixteenth * random.choice([1, 2, 4])
|
||
velocity = random.randint(70, 110)
|
||
notes.append({
|
||
"pitch": note_pitch,
|
||
"start_time": start,
|
||
"duration": duration,
|
||
"velocity": velocity,
|
||
})
|
||
resp = _send_to_ableton(
|
||
"create_clip",
|
||
{"track_index": track_index, "clip_index": clip_index, "length": float(bars * 2)},
|
||
timeout=TIMEOUTS["generate_melody_clip"]
|
||
)
|
||
if resp.get("status") == "success":
|
||
resp2 = _send_to_ableton(
|
||
"add_notes_to_clip",
|
||
{"track_index": track_index, "clip_index": clip_index, "notes": notes},
|
||
timeout=TIMEOUTS["generate_melody_clip"]
|
||
)
|
||
if resp2.get("status") == "success":
|
||
return _ok({
|
||
"track_index": track_index,
|
||
"clip_index": clip_index,
|
||
"scale": scale,
|
||
"density": density,
|
||
"bars": bars,
|
||
"notes_added": len(notes),
|
||
})
|
||
return _err(resp.get("message", "Failed to create melody clip"))
|
||
except Exception as e:
|
||
return _err(f"Error generating melody clip: {str(e)}")
|
||
|
||
|
||
# ------------------------------------------------------------------
|
||
# Sample Management Tools (T006-T010)
|
||
# ------------------------------------------------------------------
|
||
|
||
@mcp.tool()
|
||
def load_samples_for_genre(ctx: Context, genre: str, key: str = "", bpm: float = 0) -> str:
|
||
"""Select and load samples for a genre (T008).
|
||
|
||
This is an alias for select_samples_for_genre with additional auto-loading.
|
||
|
||
Args:
|
||
genre: Genre to select samples for
|
||
key: Musical key filter (optional)
|
||
bpm: BPM filter (optional)
|
||
|
||
Returns:
|
||
JSON with selected samples info.
|
||
"""
|
||
try:
|
||
from engines.sample_selector import SampleSelector, get_selector
|
||
selector = get_selector()
|
||
if selector is None:
|
||
return _err("Sample selector not available. Check libreria/reggaeton path.")
|
||
group = selector.select_for_genre(genre, key if key else None, bpm if bpm > 0 else None)
|
||
result = {
|
||
"genre": group.genre,
|
||
"key": group.key,
|
||
"bpm": group.bpm,
|
||
"drums": {},
|
||
"bass": [],
|
||
"synths": [],
|
||
"fx": [],
|
||
}
|
||
kit = group.drums
|
||
if kit.kick:
|
||
result["drums"]["kick"] = kit.kick.name
|
||
if kit.snare:
|
||
result["drums"]["snare"] = kit.snare.name
|
||
if kit.clap:
|
||
result["drums"]["clap"] = kit.clap.name
|
||
if kit.hat_closed:
|
||
result["drums"]["hat_closed"] = kit.hat_closed.name
|
||
if kit.hat_open:
|
||
result["drums"]["hat_open"] = kit.hat_open.name
|
||
result["bass"] = [s.name for s in (group.bass or [])[:5]]
|
||
result["synths"] = [s.name for s in (group.synths or [])[:5]]
|
||
result["fx"] = [s.name for s in (group.fx or [])[:3]]
|
||
return _ok(result)
|
||
except ImportError:
|
||
return _err("Sample selector engine not available.")
|
||
except Exception as e:
|
||
return _err(f"Error loading samples for genre: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def create_drum_kit(ctx: Context, track_index: int, kick_path: str = "",
|
||
snare_path: str = "", hat_path: str = "", clap_path: str = "") -> str:
|
||
"""Create a drum kit by loading samples into a Drum Rack (T009).
|
||
|
||
Args:
|
||
track_index: Index of the track containing the Drum Rack
|
||
kick_path: Path to kick sample (optional)
|
||
snare_path: Path to snare sample (optional)
|
||
hat_path: Path to hi-hat sample (optional)
|
||
clap_path: Path to clap sample (optional)
|
||
|
||
Returns:
|
||
JSON with kit creation status.
|
||
"""
|
||
try:
|
||
samples = [
|
||
(kick_path, 36),
|
||
(snare_path, 38),
|
||
(hat_path, 42),
|
||
(clap_path, 39),
|
||
]
|
||
loaded = []
|
||
errors = []
|
||
for path, note in samples:
|
||
if path and os.path.isfile(path):
|
||
resp = _send_to_ableton(
|
||
"load_sample_to_drum_rack",
|
||
{"track_index": track_index, "sample_path": path, "pad_note": note},
|
||
timeout=TIMEOUTS["create_drum_kit"]
|
||
)
|
||
if resp.get("status") == "success":
|
||
loaded.append({"note": note, "path": path})
|
||
else:
|
||
errors.append({"note": note, "error": resp.get("message", "unknown")})
|
||
elif path:
|
||
errors.append({"note": note, "error": f"File not found: {path}"})
|
||
return _ok({
|
||
"track_index": track_index,
|
||
"samples_loaded": len(loaded),
|
||
"loaded": loaded,
|
||
"errors": errors,
|
||
})
|
||
except Exception as e:
|
||
return _err(f"Error creating drum kit: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def build_track_from_samples(ctx: Context, track_type: str = "drums",
|
||
sample_role: str = "drums") -> str:
|
||
"""Build a complete track from library samples (T010).
|
||
|
||
Creates a track and loads appropriate samples automatically.
|
||
|
||
Args:
|
||
track_type: Type of track (drums, bass, melody, fx)
|
||
sample_role: Sample role to filter by (drums, bass, synths, fx)
|
||
|
||
Returns:
|
||
JSON with track creation and sample loading status.
|
||
"""
|
||
try:
|
||
from engines.sample_selector import get_selector
|
||
selector = get_selector()
|
||
if selector is None:
|
||
return _err("Sample selector not available.")
|
||
resp = _send_to_ableton(
|
||
"create_audio_track",
|
||
{"index": -1},
|
||
timeout=TIMEOUTS["build_track_from_samples"]
|
||
)
|
||
if resp.get("status") != "success":
|
||
return _err("Failed to create audio track")
|
||
track_index = resp.get("track_index", -1)
|
||
if track_index < 0:
|
||
return _err("Invalid track index returned")
|
||
_send_to_ableton(
|
||
"set_track_name",
|
||
{"track_index": track_index, "name": f"{track_type.title()} Track"},
|
||
timeout=TIMEOUTS["build_track_from_samples"]
|
||
)
|
||
samples = selector.get_samples_by_role(sample_role)[:4]
|
||
loaded = []
|
||
for i, sample in enumerate(samples):
|
||
clip_resp = _send_to_ableton(
|
||
"load_sample_to_clip",
|
||
{"track_index": track_index, "clip_index": i, "sample_path": sample.path},
|
||
timeout=TIMEOUTS["build_track_from_samples"]
|
||
)
|
||
if clip_resp.get("status") == "success":
|
||
loaded.append({"index": i, "sample": sample.name})
|
||
return _ok({
|
||
"track_type": track_type,
|
||
"track_index": track_index,
|
||
"samples_loaded": len(loaded),
|
||
"samples": loaded,
|
||
})
|
||
except ImportError:
|
||
return _err("Sample selector engine not available.")
|
||
except Exception as e:
|
||
return _err(f"Error building track from samples: {str(e)}")
|
||
|
||
|
||
# ------------------------------------------------------------------
|
||
# Configuration-Based Generators (T011-T015)
|
||
# ------------------------------------------------------------------
|
||
|
||
@mcp.tool()
|
||
def generate_full_song(ctx: Context, bpm: float = 95, key: str = "Am",
|
||
style: str = "classic", structure: str = "standard") -> str:
|
||
"""Generate a complete song with multiple elements (T011).
|
||
|
||
This is an enhanced version that creates drums, bass, chords, and melody.
|
||
|
||
Args:
|
||
bpm: Tempo in BPM (default 95)
|
||
key: Musical key (default Am)
|
||
style: Song style (classic, modern, perreo, moombahton)
|
||
structure: Song structure (standard, verse-chorus, full)
|
||
|
||
Returns:
|
||
JSON with song generation summary.
|
||
"""
|
||
try:
|
||
from engines.production_workflow import ProductionWorkflow
|
||
workflow = ProductionWorkflow()
|
||
result = workflow.generate_song(
|
||
genre="reggaeton",
|
||
bpm=bpm,
|
||
key=key,
|
||
style=style,
|
||
structure=structure
|
||
)
|
||
return _ok({
|
||
"song_type": "full",
|
||
"bpm": bpm,
|
||
"key": key,
|
||
"style": style,
|
||
"structure": structure,
|
||
"tracks_created": result.get("tracks", []),
|
||
"clips_generated": result.get("clips", []),
|
||
"duration_bars": result.get("duration_bars", 128),
|
||
})
|
||
except ImportError:
|
||
return _err("Production workflow engine not available.")
|
||
except Exception as e:
|
||
return _err(f"Error generating full song: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def generate_track_from_config(ctx: Context, track_config_json: str) -> str:
|
||
"""Generate a track from a JSON configuration (T012).
|
||
|
||
Flexible track generation using a configuration object.
|
||
|
||
Args:
|
||
track_config_json: JSON string with track configuration
|
||
Example: '{"type": "drums", "pattern": "dembow", "bars": 8}'
|
||
|
||
Returns:
|
||
JSON with track generation status.
|
||
"""
|
||
try:
|
||
import json as json_lib
|
||
config = json_lib.loads(track_config_json)
|
||
track_type = config.get("type", "drums")
|
||
resp = _send_to_ableton(
|
||
"create_midi_track",
|
||
{"index": -1},
|
||
timeout=TIMEOUTS["generate_track_from_config"]
|
||
)
|
||
if resp.get("status") != "success":
|
||
return _err("Failed to create MIDI track")
|
||
track_index = resp.get("track_index", -1)
|
||
_send_to_ableton(
|
||
"set_track_name",
|
||
{"track_index": track_index, "name": config.get("name", f"{track_type.title()} Track")},
|
||
timeout=TIMEOUTS["generate_track_from_config"]
|
||
)
|
||
if track_type == "drums":
|
||
pattern = config.get("pattern", "dembow")
|
||
bars = config.get("bars", 4)
|
||
if pattern == "dembow":
|
||
return generate_dembow_clip(ctx, track_index, 0, bars, "standard")
|
||
elif track_type == "bass":
|
||
bars = config.get("bars", 4)
|
||
root_notes = config.get("root_notes", [36])
|
||
style = config.get("style", "standard")
|
||
return generate_bass_clip(ctx, track_index, 0, bars, root_notes, style)
|
||
elif track_type == "chords":
|
||
bars = config.get("bars", 4)
|
||
progression = config.get("progression", "i-v-vi-iv")
|
||
key = config.get("key", "Am")
|
||
return generate_chords_clip(ctx, track_index, 0, bars, progression, key)
|
||
elif track_type == "melody":
|
||
bars = config.get("bars", 4)
|
||
scale = config.get("scale", "minor")
|
||
density = config.get("density", "medium")
|
||
return generate_melody_clip(ctx, track_index, 0, bars, scale, density)
|
||
return _ok({
|
||
"track_type": track_type,
|
||
"track_index": track_index,
|
||
"config": config,
|
||
"status": "created",
|
||
})
|
||
except json_lib.JSONDecodeError:
|
||
return _err("Invalid JSON configuration")
|
||
except Exception as e:
|
||
return _err(f"Error generating track from config: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def generate_section(ctx: Context, section_config_json: str, start_bar: int = 0) -> str:
|
||
"""Generate a song section from JSON config (T013).
|
||
|
||
Creates a section (verse, chorus, intro, etc.) at the specified position.
|
||
|
||
Args:
|
||
section_config_json: JSON string with section configuration
|
||
Example: '{"type": "verse", "bars": 16, "elements": ["drums", "bass"]}'
|
||
start_bar: Starting bar position in the song
|
||
|
||
Returns:
|
||
JSON with section generation status.
|
||
"""
|
||
try:
|
||
import json as json_lib
|
||
config = json_lib.loads(section_config_json)
|
||
section_type = config.get("type", "verse")
|
||
bars = config.get("bars", 8)
|
||
elements = config.get("elements", ["drums"])
|
||
tracks_created = []
|
||
for element in elements:
|
||
element_config = {
|
||
"type": element,
|
||
"bars": bars,
|
||
"name": f"{section_type.title()} {element.title()}",
|
||
}
|
||
if element == "drums":
|
||
element_config["pattern"] = "dembow"
|
||
result = generate_track_from_config(ctx, json_lib.dumps(element_config))
|
||
tracks_created.append({"element": element, "result": result})
|
||
return _ok({
|
||
"section_type": section_type,
|
||
"start_bar": start_bar,
|
||
"bars": bars,
|
||
"elements": elements,
|
||
"tracks_created": len(tracks_created),
|
||
})
|
||
except json_lib.JSONDecodeError:
|
||
return _err("Invalid JSON configuration")
|
||
except Exception as e:
|
||
return _err(f"Error generating section: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def apply_human_feel(ctx: Context, track_index: int, intensity: float = 0.5) -> str:
|
||
"""Apply humanization feel to a MIDI track (T014).
|
||
|
||
Adds velocity and timing variations for a more natural feel.
|
||
|
||
Args:
|
||
track_index: Index of the track to humanize
|
||
intensity: Humanization intensity 0.0-1.0 (default 0.5)
|
||
|
||
Returns:
|
||
JSON with humanization status.
|
||
"""
|
||
if not 0.0 <= intensity <= 1.0:
|
||
return _err(f"Invalid intensity: {intensity}. Must be 0.0-1.0.")
|
||
try:
|
||
resp = _send_to_ableton(
|
||
"humanize_track",
|
||
{"track_index": track_index, "intensity": intensity},
|
||
timeout=TIMEOUTS["apply_human_feel"]
|
||
)
|
||
if resp.get("status") == "success":
|
||
return _ok({
|
||
"track_index": track_index,
|
||
"intensity": intensity,
|
||
"notes_affected": resp.get("notes_affected", 0),
|
||
"velocity_variation": resp.get("velocity_variation", 0),
|
||
"timing_variation": resp.get("timing_variation", 0),
|
||
})
|
||
return _err(resp.get("message", "Failed to apply human feel"))
|
||
except Exception as e:
|
||
return _err(f"Error applying human feel: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def add_percussion_fills(ctx: Context, track_index: int, positions: list = None) -> str:
|
||
"""Add percussion fills at specified positions (T015).
|
||
|
||
Inserts drum fills at specific bars in the arrangement.
|
||
|
||
Args:
|
||
track_index: Index of the percussion track
|
||
positions: List of bar positions for fills (default [7, 15, 23, 31])
|
||
|
||
Returns:
|
||
JSON with fills addition status.
|
||
"""
|
||
if positions is None:
|
||
positions = [7, 15, 23, 31]
|
||
try:
|
||
fill_pattern = [
|
||
{"pitch": 38, "start_time": 0.0, "duration": 0.125, "velocity": 110},
|
||
{"pitch": 42, "start_time": 0.25, "duration": 0.125, "velocity": 100},
|
||
{"pitch": 38, "start_time": 0.5, "duration": 0.125, "velocity": 110},
|
||
{"pitch": 36, "start_time": 0.75, "duration": 0.125, "velocity": 120},
|
||
]
|
||
fills_added = []
|
||
for pos in positions:
|
||
full_fill = []
|
||
for note in fill_pattern:
|
||
full_fill.append({
|
||
"pitch": note["pitch"],
|
||
"start_time": note["start_time"] + pos * 2.0,
|
||
"duration": note["duration"],
|
||
"velocity": note["velocity"],
|
||
})
|
||
resp = _send_to_ableton(
|
||
"add_notes_to_clip",
|
||
{"track_index": track_index, "clip_index": 0, "notes": full_fill},
|
||
timeout=TIMEOUTS["add_percussion_fills"]
|
||
)
|
||
if resp.get("status") == "success":
|
||
fills_added.append({"position": pos, "notes": len(full_fill)})
|
||
return _ok({
|
||
"track_index": track_index,
|
||
"fills_added": len(fills_added),
|
||
"positions": positions,
|
||
"details": fills_added,
|
||
})
|
||
except Exception as e:
|
||
return _err(f"Error adding percussion fills: {str(e)}")
|
||
|
||
|
||
# ==================================================================
|
||
# FASE 6: PHASE 2 - ARRANGEMENT & AUTOMATION (T021-T026)
|
||
# ==================================================================
|
||
|
||
@mcp.tool()
|
||
def build_arrangement_structure(ctx: Context, song_config: str) -> str:
|
||
"""Build a complete arrangement structure (T021).
|
||
|
||
Creates song sections and arranges them in Arrangement View.
|
||
|
||
Args:
|
||
song_config: JSON string with song configuration
|
||
Example: '{"sections": [{"type": "intro", "bars": 8}, {"type": "verse", "bars": 16}]}'
|
||
|
||
Returns:
|
||
JSON with arrangement structure status.
|
||
"""
|
||
try:
|
||
import json as json_lib
|
||
config = json_lib.loads(song_config)
|
||
sections = config.get("sections", [])
|
||
current_bar = 0
|
||
created_sections = []
|
||
for section in sections:
|
||
section_type = section.get("type", "verse")
|
||
bars = section.get("bars", 8)
|
||
section_config = json_lib.dumps({
|
||
"type": section_type,
|
||
"bars": bars,
|
||
"elements": section.get("elements", ["drums", "bass"]),
|
||
})
|
||
result = generate_section(ctx, section_config, current_bar)
|
||
created_sections.append({
|
||
"type": section_type,
|
||
"start_bar": current_bar,
|
||
"bars": bars,
|
||
"result": result,
|
||
})
|
||
current_bar += bars
|
||
return _ok({
|
||
"total_sections": len(created_sections),
|
||
"total_bars": current_bar,
|
||
"sections": created_sections,
|
||
})
|
||
except json_lib.JSONDecodeError:
|
||
return _err("Invalid JSON configuration")
|
||
except Exception as e:
|
||
return _err(f"Error building arrangement structure: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def create_arrangement_midi_clip(ctx: Context, track_index: int, start_time: float = 0.0,
|
||
length: float = 4.0, notes: list = None) -> str:
|
||
"""Create a MIDI clip in Arrangement View (T023).
|
||
|
||
Args:
|
||
track_index: Index of the target track
|
||
start_time: Start position in bars
|
||
length: Clip length in bars
|
||
notes: List of MIDI notes to add
|
||
|
||
Returns:
|
||
JSON with clip creation status.
|
||
"""
|
||
if notes is None:
|
||
notes = []
|
||
try:
|
||
resp = _send_to_ableton(
|
||
"create_arrangement_midi_clip",
|
||
{"track_index": track_index, "start_time": start_time, "length": length, "notes": notes},
|
||
timeout=TIMEOUTS["create_arrangement_midi_clip"]
|
||
)
|
||
if resp.get("status") == "success":
|
||
return _ok({
|
||
"track_index": track_index,
|
||
"start_time": start_time,
|
||
"length": length,
|
||
"notes_added": len(notes),
|
||
"view": "Arrangement",
|
||
})
|
||
return _err(resp.get("message", "Failed to create arrangement MIDI clip"))
|
||
except Exception as e:
|
||
return _err(f"Error creating arrangement MIDI clip: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def create_arrangement_audio_clip(ctx: Context, track_index: int, sample_path: str,
|
||
start_time: float = 0.0, length: float = 4.0) -> str:
|
||
"""Create an audio clip in Arrangement View (T024).
|
||
|
||
Args:
|
||
track_index: Index of the target audio track
|
||
sample_path: Absolute path to the audio file
|
||
start_time: Start position in bars
|
||
length: Clip length in bars
|
||
|
||
Returns:
|
||
JSON with clip creation status.
|
||
"""
|
||
if not os.path.isfile(sample_path):
|
||
return _err(f"Sample not found: {sample_path}")
|
||
try:
|
||
resp = _send_to_ableton(
|
||
"create_arrangement_audio_clip",
|
||
{"track_index": track_index, "sample_path": sample_path, "start_time": start_time, "length": length},
|
||
timeout=TIMEOUTS["create_arrangement_audio_clip"]
|
||
)
|
||
if resp.get("status") == "success":
|
||
return _ok({
|
||
"track_index": track_index,
|
||
"sample_path": sample_path,
|
||
"start_time": start_time,
|
||
"length": length,
|
||
"view": "Arrangement",
|
||
})
|
||
return _err(resp.get("message", "Failed to create arrangement audio clip"))
|
||
except Exception as e:
|
||
return _err(f"Error creating arrangement audio clip: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def fill_arrangement_with_song(ctx: Context, song_config: str) -> str:
|
||
"""Fill the entire arrangement with a complete song (T025).
|
||
|
||
Populates Arrangement View with all song elements.
|
||
|
||
Args:
|
||
song_config: JSON string with complete song configuration
|
||
Example: '{"bpm": 95, "key": "Am", "style": "classic", "duration": 128}'
|
||
|
||
Returns:
|
||
JSON with song arrangement status.
|
||
"""
|
||
try:
|
||
import json as json_lib
|
||
config = json_lib.loads(song_config)
|
||
bpm = config.get("bpm", 95)
|
||
key = config.get("key", "Am")
|
||
style = config.get("style", "classic")
|
||
duration = config.get("duration", 128)
|
||
resp = _send_to_ableton(
|
||
"set_tempo",
|
||
{"tempo": bpm},
|
||
timeout=10.0
|
||
)
|
||
if resp.get("status") != "success":
|
||
return _err("Failed to set tempo")
|
||
structure_config = json_lib.dumps({
|
||
"sections": [
|
||
{"type": "intro", "bars": 8, "elements": ["drums", "bass"]},
|
||
{"type": "verse", "bars": 16, "elements": ["drums", "bass", "chords"]},
|
||
{"type": "chorus", "bars": 16, "elements": ["drums", "bass", "chords", "melody"]},
|
||
{"type": "verse", "bars": 16, "elements": ["drums", "bass", "chords"]},
|
||
{"type": "chorus", "bars": 16, "elements": ["drums", "bass", "chords", "melody"]},
|
||
{"type": "outro", "bars": 8, "elements": ["drums", "bass"]},
|
||
]
|
||
})
|
||
result = build_arrangement_structure(ctx, structure_config)
|
||
return _ok({
|
||
"bpm": bpm,
|
||
"key": key,
|
||
"style": style,
|
||
"duration_bars": duration,
|
||
"arrangement_result": result,
|
||
})
|
||
except json_lib.JSONDecodeError:
|
||
return _err("Invalid JSON configuration")
|
||
except Exception as e:
|
||
return _err(f"Error filling arrangement: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def automate_filter(ctx: Context, track_index: int, start_bar: float = 0.0,
|
||
end_bar: float = 8.0, start_freq: float = 200.0,
|
||
end_freq: float = 20000.0) -> str:
|
||
"""Automate a filter sweep on a track (T026).
|
||
|
||
Creates automation for filter frequency from start to end.
|
||
|
||
Args:
|
||
track_index: Index of the target track
|
||
start_bar: Start bar for automation
|
||
end_bar: End bar for automation
|
||
start_freq: Starting filter frequency in Hz
|
||
end_freq: Ending filter frequency in Hz
|
||
|
||
Returns:
|
||
JSON with automation creation status.
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"automate_filter",
|
||
{
|
||
"track_index": track_index,
|
||
"start_bar": start_bar,
|
||
"end_bar": end_bar,
|
||
"start_freq": start_freq,
|
||
"end_freq": end_freq,
|
||
},
|
||
timeout=TIMEOUTS["automate_filter"],
|
||
defaults={
|
||
"track_index": track_index,
|
||
"start_bar": start_bar,
|
||
"end_bar": end_bar,
|
||
"start_freq": start_freq,
|
||
"end_freq": end_freq,
|
||
},
|
||
)
|
||
|
||
|
||
# ==================================================================
|
||
# FASE 2.5: FX CREATOR TOOLS (T031-T035) - Exposición de arrangement_engine
|
||
# ==================================================================
|
||
|
||
@mcp.tool()
|
||
def create_riser(ctx: Context, track_index: int, start_bar: int,
|
||
duration: int = 8, intensity: float = 0.8,
|
||
pitch_min: int = 36, pitch_max: int = 84) -> str:
|
||
"""Create a riser/buildup effect (T031).
|
||
|
||
Generates a pre-drop riser with ascending pitch and tension.
|
||
Perfect for build-ups before choruses or drops.
|
||
|
||
Args:
|
||
track_index: Index of the target track
|
||
start_bar: Start bar for the riser
|
||
duration: Duration in bars (default 8)
|
||
intensity: Intensity 0.0-1.0 (default 0.8)
|
||
pitch_min: Minimum MIDI pitch (default 36 = C2)
|
||
pitch_max: Maximum MIDI pitch (default 84 = C6)
|
||
|
||
Returns:
|
||
JSON with riser creation status and clip info.
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"create_riser",
|
||
{
|
||
"track_index": track_index,
|
||
"start_bar": start_bar,
|
||
"duration": duration,
|
||
"intensity": intensity,
|
||
"pitch_range": [pitch_min, pitch_max],
|
||
},
|
||
timeout=30.0,
|
||
defaults={
|
||
"track_index": track_index,
|
||
"start_bar": start_bar,
|
||
"duration": duration,
|
||
"intensity": intensity,
|
||
},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def create_downlifter(ctx: Context, track_index: int, start_bar: int,
|
||
duration: int = 4, intensity: float = 0.7,
|
||
pitch_start: int = 72, pitch_end: int = 36) -> str:
|
||
"""Create a downlifter effect (T032).
|
||
|
||
Generates a post-drop downlifter with descending pitch.
|
||
Perfect for energy release after drops or impacts.
|
||
|
||
Args:
|
||
track_index: Index of the target track
|
||
start_bar: Start bar for the downlifter
|
||
duration: Duration in bars (default 4)
|
||
intensity: Intensity 0.0-1.0 (default 0.7)
|
||
pitch_start: Starting MIDI pitch (default 72 = C5)
|
||
pitch_end: Ending MIDI pitch (default 36 = C2)
|
||
|
||
Returns:
|
||
JSON with downlifter creation status and clip info.
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"create_downlifter",
|
||
{
|
||
"track_index": track_index,
|
||
"start_bar": start_bar,
|
||
"duration": duration,
|
||
"intensity": intensity,
|
||
"pitch_range": [pitch_start, pitch_end],
|
||
},
|
||
timeout=30.0,
|
||
defaults={
|
||
"track_index": track_index,
|
||
"start_bar": start_bar,
|
||
"duration": duration,
|
||
"intensity": intensity,
|
||
},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def create_impact(ctx: Context, track_index: int, position: float,
|
||
intensity: float = 1.0, impact_type: str = "hit") -> str:
|
||
"""Create an impact FX (T033).
|
||
|
||
Generates impact effects (hit, crash, sub drop, noise).
|
||
Perfect for emphasizing drops, transitions, or beats.
|
||
|
||
Args:
|
||
track_index: Index of the target track
|
||
position: Position in bars (int) or beats (float)
|
||
intensity: Intensity 0.0-1.0 (default 1.0)
|
||
impact_type: Type of impact - "hit", "crash", "sub_drop", "noise"
|
||
|
||
Returns:
|
||
JSON with impact creation status and clip info.
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"create_impact",
|
||
{
|
||
"track_index": track_index,
|
||
"position": position,
|
||
"intensity": intensity,
|
||
"impact_type": impact_type,
|
||
},
|
||
timeout=30.0,
|
||
defaults={
|
||
"track_index": track_index,
|
||
"position": position,
|
||
"intensity": intensity,
|
||
"impact_type": impact_type,
|
||
},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def create_silence(ctx: Context, track_index: int, start_bar: int,
|
||
duration: int = 1) -> str:
|
||
"""Create silence/break effect (T034).
|
||
|
||
Generates a moment of silence for dramatic effect.
|
||
Perfect for creating tension before drops.
|
||
|
||
Args:
|
||
track_index: Index of the target track (for context)
|
||
start_bar: Start bar for the silence
|
||
duration: Duration in bars (default 1)
|
||
|
||
Returns:
|
||
JSON with silence creation status.
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"create_silence",
|
||
{
|
||
"track_index": track_index,
|
||
"start_bar": start_bar,
|
||
"duration": duration,
|
||
},
|
||
timeout=30.0,
|
||
defaults={
|
||
"track_index": track_index,
|
||
"start_bar": start_bar,
|
||
"duration": duration,
|
||
},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def create_fx_section(ctx: Context, section_type: str, start_bar: int,
|
||
duration: int = 8, track_indices: list = None) -> str:
|
||
"""Create complete FX section (T035).
|
||
|
||
Generates a complete FX section with risers, impacts, and transitions.
|
||
|
||
Args:
|
||
section_type: Type - "pre_drop", "post_drop", "transition", "build"
|
||
start_bar: Start bar for the section
|
||
duration: Duration in bars (default 8)
|
||
track_indices: List of track indices to apply FX (optional)
|
||
|
||
Returns:
|
||
JSON with FX section creation status.
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"create_fx_section",
|
||
{
|
||
"section_type": section_type,
|
||
"start_bar": start_bar,
|
||
"duration": duration,
|
||
"track_indices": track_indices or [],
|
||
},
|
||
timeout=30.0,
|
||
defaults={
|
||
"section_type": section_type,
|
||
"start_bar": start_bar,
|
||
"duration": duration,
|
||
},
|
||
)
|
||
|
||
|
||
# ==================================================================
|
||
# FASE 3: INTELIGENCIA MUSICAL (T041-T060)
|
||
# ==================================================================
|
||
|
||
@mcp.tool()
|
||
def analyze_project_key(ctx: Context) -> str:
|
||
"""Detecta el key predominante del proyecto actual (T041)."""
|
||
return _proxy_ableton_command("analyze_project_key", timeout=TIMEOUTS["analyze_project_key"])
|
||
|
||
|
||
@mcp.tool()
|
||
def harmonize_track(ctx: Context, track_index: int, progression: str = "I-V-vi-IV") -> str:
|
||
"""Armoniza un track con una progresion de acordes (T042).
|
||
|
||
Args:
|
||
track_index: Indice del track a armonizar
|
||
progression: Progresion de acordes (ej: "I-V-vi-IV", "ii-V-I", "I-IV-V")
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"harmonize_track",
|
||
{"track_index": track_index, "progression": progression},
|
||
timeout=TIMEOUTS["harmonize_track"],
|
||
defaults={"track_index": track_index, "progression": progression},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def generate_counter_melody(ctx: Context, main_melody_track: int) -> str:
|
||
"""Genera una contra-melodia que complementa la melodia principal (T043).
|
||
|
||
Args:
|
||
main_melody_track: Indice del track con la melodia principal
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"generate_counter_melody",
|
||
{"main_melody_track": main_melody_track},
|
||
timeout=TIMEOUTS["generate_counter_melody"],
|
||
defaults={"main_melody_track": main_melody_track},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def detect_energy_curve(ctx: Context) -> str:
|
||
"""Analiza la curva de energia por seccion del proyecto (T044)."""
|
||
return _proxy_ableton_command("detect_energy_curve", timeout=TIMEOUTS["detect_energy_curve"])
|
||
|
||
|
||
@mcp.tool()
|
||
def balance_sections(ctx: Context) -> str:
|
||
"""Ajusta automaticamente la energia entre secciones (T045)."""
|
||
return _proxy_ableton_command("balance_sections", timeout=TIMEOUTS["balance_sections"])
|
||
|
||
|
||
@mcp.tool()
|
||
def variate_loop(ctx: Context, track_index: int, intensity: float = 0.5) -> str:
|
||
"""Crea variaciones de un loop para evitar repetitividad (T046).
|
||
|
||
Args:
|
||
track_index: Indice del track con el loop
|
||
intensity: Intensidad de variacion (0.0-1.0)
|
||
"""
|
||
if not 0.0 <= intensity <= 1.0:
|
||
return _err(f"Invalid intensity: {intensity}. Must be 0.0-1.0.")
|
||
return _proxy_ableton_command(
|
||
"variate_loop",
|
||
{"track_index": track_index, "intensity": intensity},
|
||
timeout=TIMEOUTS["variate_loop"],
|
||
defaults={"track_index": track_index, "intensity": intensity},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def add_call_and_response(ctx: Context, phrase_track: int, response_length: int = 2) -> str:
|
||
"""Genera una respuesta musical a una frase existente (T047).
|
||
|
||
Args:
|
||
phrase_track: Indice del track con la frase original
|
||
response_length: Duracion de la respuesta en compases
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"add_call_and_response",
|
||
{"phrase_track": phrase_track, "response_length": response_length},
|
||
timeout=TIMEOUTS["add_call_and_response"],
|
||
defaults={"phrase_track": phrase_track, "response_length": response_length},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def generate_breakdown(ctx: Context, start_bar: int, duration: int = 8) -> str:
|
||
"""Genera una seccion de breakdown/descanso (T048).
|
||
|
||
Args:
|
||
start_bar: Barra donde comienza el breakdown
|
||
duration: Duracion en compases (default 8)
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"generate_breakdown",
|
||
{"start_bar": start_bar, "duration": duration},
|
||
timeout=TIMEOUTS["generate_breakdown"],
|
||
defaults={"start_bar": start_bar, "duration": duration},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def generate_drop_variation(ctx: Context, original_drop_bar: int, variation_type: str = "intense") -> str:
|
||
"""Genera una variacion de un drop existente (T049).
|
||
|
||
Args:
|
||
original_drop_bar: Barra donde esta el drop original
|
||
variation_type: Tipo de variacion ("intense", "minimal", "double", "fill")
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"generate_drop_variation",
|
||
{"original_drop_bar": original_drop_bar, "variation_type": variation_type},
|
||
timeout=TIMEOUTS["generate_drop_variation"],
|
||
defaults={"original_drop_bar": original_drop_bar, "variation_type": variation_type},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def create_outro(ctx: Context, fade_duration: int = 8) -> str:
|
||
"""Crea un outro con fade out automatico (T050).
|
||
|
||
Args:
|
||
fade_duration: Duracion del fade en compases
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"create_outro",
|
||
{"fade_duration": fade_duration},
|
||
timeout=TIMEOUTS["create_outro"],
|
||
defaults={"fade_duration": fade_duration},
|
||
)
|
||
|
||
|
||
# ==================================================================
|
||
# FASE 4: WORKFLOW Y PRODUCCION (T061-T080)
|
||
# ==================================================================
|
||
|
||
@mcp.tool()
|
||
def load_preset(ctx: Context, preset_name: str) -> str:
|
||
"""Carga un preset en el proyecto actual (T062).
|
||
|
||
Args:
|
||
preset_name: Nombre del preset a cargar
|
||
"""
|
||
try:
|
||
from engines.workflow_engine import WorkflowEngine
|
||
engine = WorkflowEngine()
|
||
result = engine.load_preset(preset_name)
|
||
if result.get("success"):
|
||
return _ok({
|
||
"preset_name": preset_name,
|
||
"tracks_loaded": result.get("tracks_loaded", 0),
|
||
"devices_loaded": result.get("devices_loaded", 0),
|
||
"samples_loaded": result.get("samples_loaded", [])
|
||
})
|
||
return _err(result.get("message", "Failed to load preset"))
|
||
except Exception as e:
|
||
return _err(f"Error loading preset: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def save_as_preset(ctx: Context, name: str, description: str = "") -> str:
|
||
"""Guarda el proyecto actual como preset (T063).
|
||
|
||
Args:
|
||
name: Nombre del preset
|
||
description: Descripcion opcional
|
||
"""
|
||
try:
|
||
from engines.workflow_engine import WorkflowEngine
|
||
engine = WorkflowEngine()
|
||
result = engine.save_as_preset(name, description)
|
||
if result.get("success"):
|
||
return _ok({
|
||
"preset_name": name,
|
||
"description": description,
|
||
"saved_path": result.get("path"),
|
||
"tracks_included": result.get("tracks_included", 0)
|
||
})
|
||
return _err(result.get("message", "Failed to save preset"))
|
||
except Exception as e:
|
||
return _err(f"Error saving preset: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def list_presets(ctx: Context) -> str:
|
||
"""Lista todos los presets disponibles (T064)."""
|
||
try:
|
||
from engines.workflow_engine import WorkflowEngine
|
||
engine = WorkflowEngine()
|
||
result = engine.list_presets()
|
||
return _ok({
|
||
"presets": result.get("presets", []),
|
||
"total_count": result.get("count", 0),
|
||
"categories": result.get("categories", [])
|
||
})
|
||
except Exception as e:
|
||
return _err(f"Error listing presets: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def create_custom_preset(ctx: Context, name: str, description: str = "") -> str:
|
||
"""Crea un preset personalizado desde cero (T065).
|
||
|
||
Args:
|
||
name: Nombre del preset
|
||
description: Descripcion del preset
|
||
"""
|
||
try:
|
||
from engines.workflow_engine import WorkflowEngine
|
||
engine = WorkflowEngine()
|
||
result = engine.create_custom_preset(name, description)
|
||
if result.get("success"):
|
||
return _ok({
|
||
"preset_name": name,
|
||
"description": description,
|
||
"template_created": True,
|
||
"base_tracks": result.get("base_tracks", [])
|
||
})
|
||
return _err(result.get("message", "Failed to create preset"))
|
||
except Exception as e:
|
||
return _err(f"Error creating custom preset: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def render_stems(ctx: Context, output_dir: str) -> str:
|
||
"""Renderiza stems individuales para mezcla externa (T066).
|
||
|
||
Args:
|
||
output_dir: Directorio de salida para los stems
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"render_stems",
|
||
{"output_dir": output_dir},
|
||
timeout=TIMEOUTS["render_stems"],
|
||
defaults={"output_dir": output_dir},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def render_full_mix(ctx: Context, output_path: str) -> str:
|
||
"""Renderiza el mix completo masterizado (T067).
|
||
|
||
Args:
|
||
output_path: Ruta del archivo de salida
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"render_full_mix",
|
||
{"output_path": output_path},
|
||
timeout=TIMEOUTS["render_full_mix"],
|
||
defaults={"output_path": output_path},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def render_instrumental(ctx: Context, output_path: str) -> str:
|
||
"""Renderiza version instrumental (sin tracks de voz) (T068).
|
||
|
||
Args:
|
||
output_path: Ruta del archivo de salida
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"render_instrumental",
|
||
{"output_path": output_path},
|
||
timeout=TIMEOUTS["render_instrumental"],
|
||
defaults={"output_path": output_path},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def full_quality_check(ctx: Context) -> str:
|
||
"""Quality check completo del proyecto (T071)."""
|
||
return _proxy_ableton_command("full_quality_check", timeout=TIMEOUTS["full_quality_check"])
|
||
|
||
|
||
@mcp.tool()
|
||
def fix_quality_issues(ctx: Context, issues: list = None) -> str:
|
||
"""Arregla automaticamente los problemas detectados (T072).
|
||
|
||
Args:
|
||
issues: Lista de issues especificos a arreglar (default: todos)
|
||
"""
|
||
if issues is None:
|
||
issues = []
|
||
return _proxy_ableton_command(
|
||
"fix_quality_issues",
|
||
{"issues": issues},
|
||
timeout=TIMEOUTS["fix_quality_issues"],
|
||
defaults={"issues": issues},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def duplicate_project(ctx: Context, new_name: str) -> str:
|
||
"""Duplica el proyecto actual con nuevo nombre (T076).
|
||
|
||
Args:
|
||
new_name: Nombre para el proyecto duplicado
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"duplicate_project",
|
||
{"new_name": new_name},
|
||
timeout=TIMEOUTS["duplicate_project"],
|
||
defaults={"new_name": new_name},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def create_radio_edit(ctx: Context, output_path: str) -> str:
|
||
"""Crea una version radio edit (corta, sin intros largas) (T078).
|
||
|
||
Args:
|
||
output_path: Ruta del archivo de salida
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"create_radio_edit",
|
||
{"output_path": output_path},
|
||
timeout=TIMEOUTS["create_radio_edit"],
|
||
defaults={"output_path": output_path},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def create_dj_edit(ctx: Context, output_path: str) -> str:
|
||
"""Crea una version DJ edit (extended intro/outro, cue points) (T079).
|
||
|
||
Args:
|
||
output_path: Ruta del archivo de salida
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"create_dj_edit",
|
||
{"output_path": output_path},
|
||
timeout=TIMEOUTS["create_dj_edit"],
|
||
defaults={"output_path": output_path},
|
||
)
|
||
|
||
|
||
# ==================================================================
|
||
# FASE 5: INTEGRACION FINAL (T081-T100)
|
||
# ==================================================================
|
||
|
||
@mcp.tool()
|
||
def help(ctx: Context, tool_name: str = "") -> str:
|
||
"""Lista todas las tools disponibles o ayuda detallada de una tool especifica (T096).
|
||
|
||
Args:
|
||
tool_name: Nombre de la tool para ayuda detallada (opcional). Si vacio, lista todas.
|
||
"""
|
||
tools_db = {
|
||
# Info
|
||
"get_session_info": {"description": "Obtiene informacion completa de la sesion actual de Ableton Live", "category": "Info", "params": [], "example": "get_session_info()"},
|
||
"get_tracks": {"description": "Obtiene la lista de todas las pistas del proyecto", "category": "Info", "params": [], "example": "get_tracks()"},
|
||
"get_scenes": {"description": "Obtiene la lista de todas las escenas en Session View", "category": "Info", "params": [], "example": "get_scenes()"},
|
||
"get_master_info": {"description": "Obtiene informacion de la pista master", "category": "Info", "params": [], "example": "get_master_info()"},
|
||
"health_check": {"description": "Verificacion completa del sistema (5 chequeos, score 0-5). EJECUTAR PRIMERO", "category": "Info", "params": [], "example": "health_check()"},
|
||
# Transport
|
||
"start_playback": {"description": "Inicia la reproduccion", "category": "Transport", "params": [], "example": "start_playback()"},
|
||
"stop_playback": {"description": "Detiene la reproduccion", "category": "Transport", "params": [], "example": "stop_playback()"},
|
||
"toggle_playback": {"description": "Alterna reproduccion/parada", "category": "Transport", "params": [], "example": "toggle_playback()"},
|
||
"stop_all_clips": {"description": "Detiene todos los clips en Session View", "category": "Transport", "params": [], "example": "stop_all_clips()"},
|
||
# Settings
|
||
"set_tempo": {"description": "Establece el tempo del proyecto en BPM", "category": "Settings", "params": [{"name": "tempo", "type": "float", "range": "20-300"}], "example": "set_tempo(tempo=95)"},
|
||
"set_time_signature": {"description": "Establece la firma de tiempo", "category": "Settings", "params": [{"name": "numerator", "type": "int", "default": 4}, {"name": "denominator", "type": "int", "default": 4}], "example": "set_time_signature(numerator=4, denominator=4)"},
|
||
"set_metronome": {"description": "Activa o desactiva el metronomo", "category": "Settings", "params": [{"name": "enabled", "type": "bool"}], "example": "set_metronome(enabled=True)"},
|
||
# Tracks
|
||
"create_midi_track": {"description": "Crea una nueva pista MIDI", "category": "Tracks", "params": [{"name": "index", "type": "int", "default": -1}], "example": "create_midi_track(index=-1)"},
|
||
"create_audio_track": {"description": "Crea una nueva pista de audio", "category": "Tracks", "params": [{"name": "index", "type": "int", "default": -1}], "example": "create_audio_track(index=-1)"},
|
||
"set_track_name": {"description": "Establece el nombre de una pista", "category": "Tracks", "params": [{"name": "track_index", "type": "int"}, {"name": "name", "type": "str"}], "example": "set_track_name(track_index=0, name='Drums')"},
|
||
"set_track_volume": {"description": "Establece el volumen de una pista (0.0-1.0)", "category": "Tracks", "params": [{"name": "track_index", "type": "int"}, {"name": "volume", "type": "float", "range": "0.0-1.0"}], "example": "set_track_volume(track_index=0, volume=0.8)"},
|
||
"set_track_pan": {"description": "Establece el paneo de una pista (-1.0 a 1.0)", "category": "Tracks", "params": [{"name": "track_index", "type": "int"}, {"name": "pan", "type": "float", "range": "-1.0 a 1.0"}], "example": "set_track_pan(track_index=0, pan=0.0)"},
|
||
"set_track_mute": {"description": "Silencia o reactiva una pista", "category": "Tracks", "params": [{"name": "track_index", "type": "int"}, {"name": "mute", "type": "bool"}], "example": "set_track_mute(track_index=0, mute=True)"},
|
||
"set_track_solo": {"description": "Activa o desactiva solo en una pista", "category": "Tracks", "params": [{"name": "track_index", "type": "int"}, {"name": "solo", "type": "bool"}], "example": "set_track_solo(track_index=0, solo=True)"},
|
||
"set_master_volume": {"description": "Establece el volumen master (0.0-1.0)", "category": "Tracks", "params": [{"name": "volume", "type": "float", "range": "0.0-1.0"}], "example": "set_master_volume(volume=0.8)"},
|
||
# Clips
|
||
"create_clip": {"description": "Crea un clip MIDI en Session View", "category": "Clips", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int", "default": 0}, {"name": "length", "type": "float", "default": 4.0}], "example": "create_clip(track_index=0, clip_index=0, length=4.0)"},
|
||
"add_notes_to_clip": {"description": "Aniade notas MIDI a un clip", "category": "Clips", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int"}, {"name": "notes", "type": "list"}], "example": "add_notes_to_clip(track_index=0, clip_index=0, notes=[{'pitch':36,'start_time':0.0,'duration':0.25,'velocity':100}])"},
|
||
"fire_clip": {"description": "Dispara un clip en Session View", "category": "Clips", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int", "default": 0}], "example": "fire_clip(track_index=0, clip_index=0)"},
|
||
"fire_scene": {"description": "Dispara una escena completa", "category": "Clips", "params": [{"name": "scene_index", "type": "int"}], "example": "fire_scene(scene_index=0)"},
|
||
"set_scene_name": {"description": "Establece el nombre de una escena", "category": "Clips", "params": [{"name": "scene_index", "type": "int"}, {"name": "name", "type": "str"}], "example": "set_scene_name(scene_index=0, name='Verse')"},
|
||
"create_scene": {"description": "Crea una nueva escena", "category": "Clips", "params": [{"name": "index", "type": "int", "default": -1}], "example": "create_scene(index=-1)"},
|
||
# Samples
|
||
"analyze_library": {"description": "Analiza todos los samples en la libreria de reggaeton", "category": "Samples", "params": [{"name": "force_reanalyze", "type": "bool", "default": False}], "example": "analyze_library(force_reanalyze=False)"},
|
||
"get_library_stats": {"description": "Obtiene estadisticas de la libreria analizada", "category": "Samples", "params": [], "example": "get_library_stats()"},
|
||
"get_similar_samples": {"description": "Encuentra samples similares usando embeddings", "category": "Samples", "params": [{"name": "sample_path", "type": "str"}, {"name": "top_n", "type": "int", "default": 10}], "example": "get_similar_samples(sample_path='...', top_n=10)"},
|
||
"find_samples_like_audio": {"description": "Encuentra samples similares a un audio externo", "category": "Samples", "params": [{"name": "audio_path", "type": "str"}, {"name": "top_n", "type": "int", "default": 20}, {"name": "role", "type": "str", "optional": True}], "example": "find_samples_like_audio(audio_path='...', top_n=20)"},
|
||
"get_user_sound_profile": {"description": "Obtiene el perfil de sonido del usuario", "category": "Samples", "params": [], "example": "get_user_sound_profile()"},
|
||
"get_recommended_samples": {"description": "Obtiene samples recomendados para un rol", "category": "Samples", "params": [{"name": "role", "type": "str", "optional": True}, {"name": "count", "type": "int", "default": 5}], "example": "get_recommended_samples(role='kick', count=5)"},
|
||
"compare_two_samples": {"description": "Compara dos samples y devuelve similitud", "category": "Samples", "params": [{"name": "path1", "type": "str"}, {"name": "path2", "type": "str"}], "example": "compare_two_samples(path1='...', path2='...')"},
|
||
"browse_library": {"description": "Navega la libreria con filtros", "category": "Samples", "params": [{"name": "pack", "type": "str", "optional": True}, {"name": "role", "type": "str", "optional": True}, {"name": "bpm_min", "type": "float", "default": 0}, {"name": "bpm_max", "type": "float", "default": 0}, {"name": "key", "type": "str", "optional": True}], "example": "browse_library(role='kick', bpm_min=90, bpm_max=100)"},
|
||
# Mixing
|
||
"create_bus_track": {"description": "Crea un grupo (bus) para mezcla", "category": "Mixing", "params": [{"name": "bus_type", "type": "str", "default": "Group"}], "example": "create_bus_track(bus_type='Drums')"},
|
||
"route_track_to_bus": {"description": "Rutea una pista a un bus/grupo", "category": "Mixing", "params": [{"name": "track_index", "type": "int"}, {"name": "bus_name", "type": "str"}], "example": "route_track_to_bus(track_index=0, bus_name='Drums')"},
|
||
"create_return_track": {"description": "Crea una pista de retorno con efecto", "category": "Mixing", "params": [{"name": "effect_type", "type": "str", "default": "Reverb"}], "example": "create_return_track(effect_type='Reverb')"},
|
||
"set_track_send": {"description": "Configura envio a pista de retorno (0.0-1.0)", "category": "Mixing", "params": [{"name": "track_index", "type": "int"}, {"name": "return_index", "type": "int"}, {"name": "amount", "type": "float", "range": "0.0-1.0"}], "example": "set_track_send(track_index=0, return_index=0, amount=0.3)"},
|
||
"insert_device": {"description": "Inserta un dispositivo/plugin en una pista", "category": "Mixing", "params": [{"name": "track_index", "type": "int"}, {"name": "device_name", "type": "str"}], "example": "insert_device(track_index=0, device_name='EQ Eight')"},
|
||
"configure_eq": {"description": "Configura EQ Eight en una pista", "category": "Mixing", "params": [{"name": "track_index", "type": "int"}, {"name": "preset", "type": "str", "default": "default"}], "example": "configure_eq(track_index=0, preset='kick_boost')"},
|
||
"configure_compressor": {"description": "Configura compresor en una pista", "category": "Mixing", "params": [{"name": "track_index", "type": "int"}, {"name": "preset", "type": "str", "default": "default"}, {"name": "threshold", "type": "float", "default": -20.0}, {"name": "ratio", "type": "float", "default": 4.0}], "example": "configure_compressor(track_index=1, threshold=-20.0, ratio=4.0)"},
|
||
"setup_sidechain": {"description": "Configura compresion sidechain", "category": "Mixing", "params": [{"name": "source_track", "type": "int"}, {"name": "target_track", "type": "int"}, {"name": "amount", "type": "float", "range": "0.0-1.0"}], "example": "setup_sidechain(source_track=0, target_track=1, amount=0.5)"},
|
||
"auto_gain_staging": {"description": "Ajusta automaticamente niveles de ganancia", "category": "Mixing", "params": [], "example": "auto_gain_staging()"},
|
||
"apply_master_chain": {"description": "Aplica cadena de mastering al master", "category": "Mixing", "params": [{"name": "preset", "type": "str", "default": "standard"}], "example": "apply_master_chain(preset='reggaeton_streaming')"},
|
||
# Arrangement
|
||
"create_arrangement_audio_pattern": {"description": "Crea clips de audio en Arrangement View", "category": "Arrangement", "params": [{"name": "track_index", "type": "int"}, {"name": "file_path", "type": "str"}, {"name": "positions", "type": "list", "default": [0]}, {"name": "name", "type": "str", "optional": True}], "example": "create_arrangement_audio_pattern(track_index=0, file_path='...', positions=[0, 4, 8])"},
|
||
"load_sample_to_clip": {"description": "Carga sample en clip de Session View", "category": "Arrangement", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int"}, {"name": "sample_path", "type": "str"}], "example": "load_sample_to_clip(track_index=0, clip_index=0, sample_path='...')"},
|
||
"load_sample_to_drum_rack": {"description": "Carga sample en pad de Drum Rack", "category": "Arrangement", "params": [{"name": "track_index", "type": "int"}, {"name": "sample_path", "type": "str"}, {"name": "pad_note", "type": "int", "default": 36}], "example": "load_sample_to_drum_rack(track_index=0, sample_path='...', pad_note=36)"},
|
||
"set_warp_markers": {"description": "Configura marcadores de warp", "category": "Arrangement", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int"}, {"name": "markers", "type": "list"}], "example": "set_warp_markers(track_index=0, clip_index=0, markers=[...])"},
|
||
"reverse_clip": {"description": "Invierte un clip", "category": "Arrangement", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int"}], "example": "reverse_clip(track_index=0, clip_index=0)"},
|
||
"pitch_shift_clip": {"description": "Cambia tono de clip (-24 a +24 semitonos)", "category": "Arrangement", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int"}, {"name": "semitones", "type": "float", "range": "-24 a +24"}], "example": "pitch_shift_clip(track_index=0, clip_index=0, semitones=-2)"},
|
||
"time_stretch_clip": {"description": "Estira tiempo de clip (0.25x a 4.0x)", "category": "Arrangement", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int"}, {"name": "factor", "type": "float", "range": "0.25-4.0"}], "example": "time_stretch_clip(track_index=0, clip_index=0, factor=1.5)"},
|
||
"slice_clip": {"description": "Divide clip en segmentos (2-64)", "category": "Arrangement", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int"}, {"name": "num_slices", "type": "int", "default": 8}], "example": "slice_clip(track_index=0, clip_index=0, num_slices=8)"},
|
||
# Production
|
||
"generate_track": {"description": "Genera una pista con IA", "category": "Production", "params": [{"name": "genre", "type": "str"}, {"name": "style", "type": "str", "optional": True}, {"name": "bpm", "type": "float", "default": 0}, {"name": "key", "type": "str", "optional": True}, {"name": "structure", "type": "str", "default": "standard"}], "example": "generate_track(genre='reggaeton', bpm=95, key='Am')"},
|
||
"generate_song": {"description": "Genera cancion completa con IA", "category": "Production", "params": [{"name": "genre", "type": "str"}, {"name": "style", "type": "str", "optional": True}, {"name": "bpm", "type": "float", "default": 0}, {"name": "key", "type": "str", "optional": True}, {"name": "structure", "type": "str", "default": "standard"}], "example": "generate_song(genre='reggaeton', bpm=95, key='Am')"},
|
||
"select_samples_for_genre": {"description": "Selecciona samples para un genero", "category": "Production", "params": [{"name": "genre", "type": "str"}, {"name": "key", "type": "str", "optional": True}, {"name": "bpm", "type": "float", "default": 0}], "example": "select_samples_for_genre(genre='reggaeton', key='Am', bpm=95)"},
|
||
"generate_complete_reggaeton": {"description": "Genera proyecto completo de reggaeton", "category": "Production", "params": [{"name": "bpm", "type": "float", "default": 95}, {"name": "key", "type": "str", "default": "Am"}, {"name": "style", "type": "str", "default": "classic"}, {"name": "structure", "type": "str", "default": "verse-chorus"}, {"name": "use_samples", "type": "bool", "default": True}], "example": "generate_complete_reggaeton(bpm=95, key='Am', style='classic')"},
|
||
"generate_from_reference": {"description": "Genera track desde audio de referencia", "category": "Production", "params": [{"name": "reference_audio_path", "type": "str"}], "example": "generate_from_reference(reference_audio_path='...')"},
|
||
"produce_reggaeton": {"description": "Pipeline completo de produccion reggaeton", "category": "Production", "params": [{"name": "bpm", "type": "float", "default": 95}, {"name": "key", "type": "str", "default": "Am"}, {"name": "style", "type": "str", "default": "classic"}, {"name": "structure", "type": "str", "default": "verse-chorus"}], "example": "produce_reggaeton(bpm=95, key='Am', style='classic', structure='verse-chorus')"},
|
||
"produce_from_reference": {"description": "Genera produccion desde referencia", "category": "Production", "params": [{"name": "audio_path", "type": "str"}], "example": "produce_from_reference(audio_path='...')"},
|
||
"produce_arrangement": {"description": "Genera produccion en Arrangement View", "category": "Production", "params": [{"name": "bpm", "type": "float", "default": 95}, {"name": "key", "type": "str", "default": "Am"}, {"name": "style", "type": "str", "default": "classic"}], "example": "produce_arrangement(bpm=95, key='Am', style='classic')"},
|
||
"complete_production": {"description": "Pipeline completo con renderizado", "category": "Production", "params": [{"name": "bpm", "type": "float", "default": 95}, {"name": "key", "type": "str", "default": "Am"}, {"name": "style", "type": "str", "default": "classic"}, {"name": "output_dir", "type": "str", "optional": True}], "example": "complete_production(bpm=95, key='Am', style='classic')"},
|
||
"batch_produce": {"description": "Produce multiples canciones en lote", "category": "Production", "params": [{"name": "count", "type": "int", "default": 3}, {"name": "style", "type": "str", "default": "classic"}, {"name": "bpm_range", "type": "str", "default": "90-100"}], "example": "batch_produce(count=3, style='classic', bpm_range='90-100')"},
|
||
"generate_midi_clip": {"description": "Crea clip MIDI con notas especificas", "category": "Production", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int", "default": 0}, {"name": "notes", "type": "list", "optional": True}], "example": "generate_midi_clip(track_index=0, clip_index=0, notes=[...])"},
|
||
"generate_dembow_clip": {"description": "Genera clip MIDI con patron dembow", "category": "Production", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int", "default": 0}, {"name": "bars", "type": "int", "default": 4}, {"name": "variation", "type": "str", "default": "standard"}], "example": "generate_dembow_clip(track_index=0, clip_index=0, bars=4, variation='standard')"},
|
||
"generate_bass_clip": {"description": "Genera clip MIDI de bajo reggaeton", "category": "Production", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int", "default": 0}, {"name": "bars", "type": "int", "default": 4}, {"name": "root_notes", "type": "list", "optional": True}, {"name": "style", "type": "str", "default": "standard"}], "example": "generate_bass_clip(track_index=1, clip_index=0, bars=4, style='standard')"},
|
||
"generate_chords_clip": {"description": "Genera clip MIDI de acordes", "category": "Production", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int", "default": 0}, {"name": "bars", "type": "int", "default": 4}, {"name": "progression", "type": "str", "default": "i-v-vi-iv"}, {"name": "key", "type": "str", "default": "Am"}], "example": "generate_chords_clip(track_index=2, clip_index=0, bars=4, progression='i-v-vi-iv', key='Am')"},
|
||
"generate_melody_clip": {"description": "Genera clip MIDI de melodia", "category": "Production", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int", "default": 0}, {"name": "bars", "type": "int", "default": 4}, {"name": "scale", "type": "str", "default": "minor"}, {"name": "density", "type": "str", "default": "medium"}], "example": "generate_melody_clip(track_index=3, clip_index=0, bars=4, scale='minor', density='medium')"},
|
||
"load_samples_for_genre": {"description": "Selecciona y carga samples para genero", "category": "Production", "params": [{"name": "genre", "type": "str"}, {"name": "key", "type": "str", "optional": True}, {"name": "bpm", "type": "float", "default": 0}], "example": "load_samples_for_genre(genre='reggaeton', key='Am', bpm=95)"},
|
||
"create_drum_kit": {"description": "Crea drum kit en Drum Rack", "category": "Production", "params": [{"name": "track_index", "type": "int"}, {"name": "kick_path", "type": "str", "optional": True}, {"name": "snare_path", "type": "str", "optional": True}, {"name": "hat_path", "type": "str", "optional": True}, {"name": "clap_path", "type": "str", "optional": True}], "example": "create_drum_kit(track_index=0, kick_path='...', snare_path='...', hat_path='...', clap_path='...')"},
|
||
"build_track_from_samples": {"description": "Construye pista completa desde samples", "category": "Production", "params": [{"name": "track_type", "type": "str", "default": "drums"}, {"name": "sample_role", "type": "str", "default": "drums"}], "example": "build_track_from_samples(track_type='drums', sample_role='drums')"},
|
||
"generate_full_song": {"description": "Genera cancion completa con drums/bass/chords/melody", "category": "Production", "params": [{"name": "bpm", "type": "float", "default": 95}, {"name": "key", "type": "str", "default": "Am"}, {"name": "style", "type": "str", "default": "classic"}, {"name": "structure", "type": "str", "default": "standard"}], "example": "generate_full_song(bpm=95, key='Am', style='classic')"},
|
||
"generate_track_from_config": {"description": "Genera pista desde JSON config", "category": "Production", "params": [{"name": "track_config_json", "type": "str"}], "example": "generate_track_from_config(track_config_json='{\"type\":\"drums\",\"pattern\":\"dembow\",\"bars\":8}')"},
|
||
"generate_section": {"description": "Genera seccion de cancion desde JSON", "category": "Production", "params": [{"name": "section_config_json", "type": "str"}, {"name": "start_bar", "type": "int", "default": 0}], "example": "generate_section(section_config_json='{\"type\":\"verse\",\"bars\":16,\"elements\":[\"drums\",\"bass\"]}', start_bar=0)"},
|
||
"apply_human_feel": {"description": "Humaniza pista MIDI (0.0-1.0)", "category": "Production", "params": [{"name": "track_index", "type": "int"}, {"name": "intensity", "type": "float", "range": "0.0-1.0"}], "example": "apply_human_feel(track_index=0, intensity=0.3)"},
|
||
"add_percussion_fills": {"description": "Aniade fills de percusion", "category": "Production", "params": [{"name": "track_index", "type": "int"}, {"name": "positions", "type": "list", "default": [7, 15, 23, 31]}], "example": "add_percussion_fills(track_index=0, positions=[7, 15, 23, 31])"},
|
||
# Musical Intelligence
|
||
"analyze_project_key": {"description": "Detecta tonalidad del proyecto", "category": "Musical Intelligence", "params": [], "example": "analyze_project_key()"},
|
||
"harmonize_track": {"description": "Armoniza pista con progresion", "category": "Musical Intelligence", "params": [{"name": "track_index", "type": "int"}, {"name": "progression", "type": "str", "default": "I-V-vi-IV"}], "example": "harmonize_track(track_index=2, progression='I-V-vi-IV')"},
|
||
"generate_counter_melody": {"description": "Genera contra-melodia", "category": "Musical Intelligence", "params": [{"name": "main_melody_track", "type": "int"}], "example": "generate_counter_melody(main_melody_track=3)"},
|
||
"detect_energy_curve": {"description": "Analiza curva de energia por seccion", "category": "Musical Intelligence", "params": [], "example": "detect_energy_curve()"},
|
||
"balance_sections": {"description": "Ajusta energia entre secciones", "category": "Musical Intelligence", "params": [], "example": "balance_sections()"},
|
||
"variate_loop": {"description": "Crea variaciones de loop (0.0-1.0)", "category": "Musical Intelligence", "params": [{"name": "track_index", "type": "int"}, {"name": "intensity", "type": "float", "range": "0.0-1.0"}], "example": "variate_loop(track_index=0, intensity=0.5)"},
|
||
"add_call_and_response": {"description": "Genera respuesta musical a frase", "category": "Musical Intelligence", "params": [{"name": "phrase_track", "type": "int"}, {"name": "response_length", "type": "int", "default": 2}], "example": "add_call_and_response(phrase_track=3, response_length=2)"},
|
||
"generate_breakdown": {"description": "Genera seccion breakdown", "category": "Musical Intelligence", "params": [{"name": "start_bar", "type": "int"}, {"name": "duration", "type": "int", "default": 8}], "example": "generate_breakdown(start_bar=32, duration=8)"},
|
||
"generate_drop_variation": {"description": "Genera variacion de drop", "category": "Musical Intelligence", "params": [{"name": "original_drop_bar", "type": "int"}, {"name": "variation_type", "type": "str", "default": "intense"}], "example": "generate_drop_variation(original_drop_bar=16, variation_type='intense')"},
|
||
"create_outro": {"description": "Crea outro con fade out", "category": "Musical Intelligence", "params": [{"name": "fade_duration", "type": "int", "default": 8}], "example": "create_outro(fade_duration=8)"},
|
||
# Workflow
|
||
"export_project": {"description": "Exporta proyecto a archivo de audio", "category": "Workflow", "params": [{"name": "path", "type": "str"}, {"name": "format", "type": "str", "default": "wav"}], "example": "export_project(path='C:\\\\output.wav', format='wav')"},
|
||
"get_project_summary": {"description": "Obtiene resumen del proyecto", "category": "Workflow", "params": [], "example": "get_project_summary()"},
|
||
"suggest_improvements": {"description": "Sugerencias IA para mejorar proyecto", "category": "Workflow", "params": [], "example": "suggest_improvements()"},
|
||
"validate_project": {"description": "Valida consistencia del proyecto", "category": "Workflow", "params": [], "example": "validate_project()"},
|
||
"humanize_track": {"description": "Humaniza pista MIDI (0.0-1.0)", "category": "Workflow", "params": [{"name": "track_index", "type": "int"}, {"name": "intensity", "type": "float", "range": "0.0-1.0"}], "example": "humanize_track(track_index=0, intensity=0.5)"},
|
||
"load_preset": {"description": "Carga preset en proyecto", "category": "Workflow", "params": [{"name": "preset_name", "type": "str"}], "example": "load_preset(preset_name='reggaeton_basic')"},
|
||
"save_as_preset": {"description": "Guarda proyecto como preset", "category": "Workflow", "params": [{"name": "name", "type": "str"}, {"name": "description", "type": "str", "optional": True}], "example": "save_as_preset(name='mi_preset', description='Mi template de reggaeton')"},
|
||
"list_presets": {"description": "Lista presets disponibles", "category": "Workflow", "params": [], "example": "list_presets()"},
|
||
"create_custom_preset": {"description": "Crea preset personalizado", "category": "Workflow", "params": [{"name": "name", "type": "str"}, {"name": "description", "type": "str", "optional": True}], "example": "create_custom_preset(name='nuevo_preset', description='...')"},
|
||
"render_stems": {"description": "Renderiza stems individuales", "category": "Workflow", "params": [{"name": "output_dir", "type": "str"}], "example": "render_stems(output_dir='C:\\\\stems\\\\')"},
|
||
"render_full_mix": {"description": "Renderiza mix completo masterizado", "category": "Workflow", "params": [{"name": "output_path", "type": "str"}], "example": "render_full_mix(output_path='C:\\\\mix_final.wav')"},
|
||
"render_instrumental": {"description": "Renderiza version instrumental", "category": "Workflow", "params": [{"name": "output_path", "type": "str"}], "example": "render_instrumental(output_path='C:\\\\instrumental.wav')"},
|
||
"full_quality_check": {"description": "Verificacion de calidad completa", "category": "Workflow", "params": [], "example": "full_quality_check()"},
|
||
"fix_quality_issues": {"description": "Arregla problemas de calidad", "category": "Workflow", "params": [{"name": "issues", "type": "list", "optional": True}], "example": "fix_quality_issues(issues=[])"},
|
||
"duplicate_project": {"description": "Duplica proyecto con nuevo nombre", "category": "Workflow", "params": [{"name": "new_name", "type": "str"}], "example": "duplicate_project(new_name='mi_track_v2')"},
|
||
"create_radio_edit": {"description": "Crea version radio edit", "category": "Workflow", "params": [{"name": "output_path", "type": "str"}], "example": "create_radio_edit(output_path='C:\\\\radio_edit.wav')"},
|
||
"create_dj_edit": {"description": "Crea version DJ edit", "category": "Workflow", "params": [{"name": "output_path", "type": "str"}], "example": "create_dj_edit(output_path='C:\\\\dj_edit.wav')"},
|
||
"get_production_report": {"description": "Genera reporte completo de produccion", "category": "Workflow", "params": [], "example": "get_production_report()"},
|
||
# Diagnostics
|
||
"get_memory_usage": {"description": "Uso de memoria del sistema", "category": "Diagnostics", "params": [], "example": "get_memory_usage()"},
|
||
"get_progress_report": {"description": "Reporte de progreso del proyecto", "category": "Diagnostics", "params": [], "example": "get_progress_report()"},
|
||
# System
|
||
"ping": {"description": "Ping simple para verificar conectividad MCP", "category": "System", "params": [], "example": "ping()"},
|
||
"help": {"description": "Lista todas las tools o ayuda detallada de una tool", "category": "System", "params": [{"name": "tool_name", "type": "str", "optional": True}], "example": "help() o help(tool_name='produce_reggaeton')"},
|
||
"get_workflow_status": {"description": "Estado actual del workflow de produccion", "category": "System", "params": [], "example": "get_workflow_status()"},
|
||
"undo": {"description": "Deshace ultima accion", "category": "System", "params": [], "example": "undo()"},
|
||
"redo": {"description": "Rehace ultima accion deshecha", "category": "System", "params": [], "example": "redo()"},
|
||
"save_checkpoint": {"description": "Guarda checkpoint del proyecto", "category": "System", "params": [{"name": "name", "type": "str", "default": "auto"}], "example": "save_checkpoint(name='antes_mejora')"},
|
||
"set_multiple_progressions": {"description": "Configura progresiones para multiples secciones", "category": "System", "params": [{"name": "progressions_config", "type": "list"}], "example": "set_multiple_progressions(progressions_config=[...])"},
|
||
"modulate_key": {"description": "Modula a nueva tonalidad en seccion", "category": "System", "params": [{"name": "section_index", "type": "int"}, {"name": "new_key", "type": "str"}], "example": "modulate_key(section_index=2, new_key='Dm')"},
|
||
"enable_parallel_processing": {"description": "Activa/desactiva procesamiento paralelo", "category": "System", "params": [{"name": "enabled", "type": "bool", "default": True}], "example": "enable_parallel_processing(enabled=True)"},
|
||
}
|
||
|
||
# Si se proporciona tool_name, devolver ayuda detallada
|
||
if tool_name:
|
||
tool_name_lower = tool_name.lower()
|
||
matches = {k: v for k, v in tools_db.items() if k.lower() == tool_name_lower}
|
||
if not matches:
|
||
# Fuzzy match
|
||
matches = {k: v for k, v in tools_db.items() if tool_name_lower in k.lower()}
|
||
if not matches:
|
||
return _err(f"Tool '{tool_name}' not found. Use help() without arguments to see all tools.")
|
||
results = []
|
||
for name, info in matches.items():
|
||
params_str = ", ".join(
|
||
p["name"] + (" (optional)" if p.get("optional") else "") + ": " + p["type"]
|
||
for p in info.get("params", [])
|
||
)
|
||
results.append({
|
||
"name": name,
|
||
"description": info["description"],
|
||
"category": info["category"],
|
||
"parameters": params_str if params_str else "None",
|
||
"example": info["example"],
|
||
})
|
||
return _ok({"tool_help": results[0] if len(results) == 1 else results})
|
||
|
||
# Sin tool_name: listar todas las tools organizadas por categoria
|
||
by_category = {}
|
||
for name, info in tools_db.items():
|
||
cat = info["category"]
|
||
if cat not in by_category:
|
||
by_category[cat] = []
|
||
by_category[cat].append({"name": name, "description": info["description"]})
|
||
|
||
return _ok({
|
||
"total_tools": len(tools_db),
|
||
"categories": sorted(by_category.keys()),
|
||
"tools_by_category": by_category,
|
||
"usage": "Use help(tool_name='toolname') for detailed help on a specific tool.",
|
||
})
|
||
|
||
|
||
@mcp.tool()
|
||
def get_workflow_status(ctx: Context) -> str:
|
||
"""Obtiene el estado actual del workflow de produccion con proximos pasos accionables (T100).
|
||
|
||
Returna:
|
||
- Estado actual del proyecto (tracks, clips, scenes)
|
||
- Configuracion de mezcla
|
||
- Contenido del arrangement
|
||
- Proximos pasos recomendados
|
||
"""
|
||
try:
|
||
# Get session info
|
||
session_resp = _send_to_ableton("get_session_info", timeout=TIMEOUTS["get_session_info"])
|
||
session_data = {}
|
||
if session_resp.get("status") == "success":
|
||
r = session_resp.get("result", {})
|
||
session_data = {
|
||
"tempo": r.get("tempo"),
|
||
"num_tracks": r.get("num_tracks", 0),
|
||
"num_scenes": r.get("num_scenes", 0),
|
||
"is_playing": r.get("is_playing", False),
|
||
"current_song_time": r.get("current_song_time", 0),
|
||
}
|
||
|
||
# Get tracks detail
|
||
tracks_resp = _send_to_ableton("get_tracks", timeout=TIMEOUTS["get_tracks"])
|
||
tracks_data = {}
|
||
has_mixing_config = False
|
||
has_arrangement_content = False
|
||
if tracks_resp.get("status") == "success":
|
||
tracks = _ableton_result(tracks_resp).get("tracks", [])
|
||
tracks_data = {
|
||
"count": len(tracks),
|
||
"midi_tracks": len([t for t in tracks if t.get("type") == "midi"]),
|
||
"audio_tracks": len([t for t in tracks if t.get("type") == "audio"]),
|
||
"track_names": [t.get("name", "") for t in tracks],
|
||
"muted": [t.get("name", "") for t in tracks if t.get("mute")],
|
||
"soloed": [t.get("name", "") for t in tracks if t.get("solo")],
|
||
}
|
||
# Check if mixing is configured (return tracks, sends, etc.)
|
||
return_tracks = _ableton_result(tracks_resp).get("return_tracks", [])
|
||
has_mixing_config = len(return_tracks) > 0 or any(t.get("devices") for t in tracks)
|
||
# Check arrangement content
|
||
has_arrangement_content = any(t.get("arrangement_clips", 0) > 0 for t in tracks)
|
||
|
||
# Determine next steps based on current state
|
||
next_steps = []
|
||
num_tracks = session_data.get("num_tracks", 0)
|
||
if num_tracks == 0:
|
||
next_steps.append("1. Crear pistas: create_midi_track() o create_audio_track()")
|
||
next_steps.append("2. Generar contenido: produce_reggaeton(bpm=95, key='Am', style='classic')")
|
||
elif not has_arrangement_content:
|
||
next_steps.append("1. Generar clips en pistas: generate_dembow_clip(), generate_bass_clip(), etc.")
|
||
next_steps.append("2. O usar pipeline automatico: produce_reggaeton(bpm=95, key='Am')")
|
||
next_steps.append("3. O construir arrangement: produce_arrangement(bpm=95, key='Am')")
|
||
|
||
if num_tracks > 0 and not has_mixing_config:
|
||
next_steps.append("Configurar mezcla: create_bus_track(), configure_eq(), configure_compressor(), setup_sidechain()")
|
||
|
||
if num_tracks > 0 and has_arrangement_content:
|
||
next_steps.append("Verificar calidad: full_quality_check()")
|
||
next_steps.append("Humanizar: apply_human_feel(track_index=0, intensity=0.3)")
|
||
next_steps.append("Exportar: render_stems(output_dir='...'), render_full_mix(output_path='...')")
|
||
|
||
if not next_steps:
|
||
next_steps.append("Ejecutar health_check() para verificar estado del sistema")
|
||
next_steps.append("Usar produce_reggaeton() para iniciar produccion rapida")
|
||
|
||
return _ok({
|
||
"project_status": {
|
||
"tempo": session_data.get("tempo"),
|
||
"tracks": tracks_data,
|
||
"num_scenes": session_data.get("num_scenes", 0),
|
||
"is_playing": session_data.get("is_playing", False),
|
||
},
|
||
"mixing_configured": has_mixing_config,
|
||
"arrangement_has_content": has_arrangement_content,
|
||
"next_steps": next_steps,
|
||
})
|
||
except Exception as e:
|
||
return _err(f"Error getting workflow status: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def undo(ctx: Context) -> str:
|
||
"""Deshace la ultima accion (T098)."""
|
||
return _proxy_ableton_command("undo", timeout=TIMEOUTS["undo"])
|
||
|
||
|
||
@mcp.tool()
|
||
def redo(ctx: Context) -> str:
|
||
"""Rehace la ultima accion deshecha (T098)."""
|
||
return _proxy_ableton_command("redo", timeout=TIMEOUTS["redo"])
|
||
|
||
|
||
@mcp.tool()
|
||
def save_checkpoint(ctx: Context, name: str = "auto") -> str:
|
||
"""Guarda un checkpoint del proyecto actual (T099).
|
||
|
||
Args:
|
||
name: Nombre del checkpoint
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"save_checkpoint",
|
||
{"name": name},
|
||
timeout=TIMEOUTS["save_checkpoint"],
|
||
defaults={"name": name},
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def get_production_report(ctx: Context) -> str:
|
||
"""Genera un reporte completo de produccion (T100)."""
|
||
try:
|
||
from engines.workflow_engine import WorkflowEngine
|
||
engine = WorkflowEngine()
|
||
result = engine.get_production_report()
|
||
return _ok({
|
||
"project_name": result.get("project_name", "Untitled"),
|
||
"duration": result.get("duration", "0:00"),
|
||
"total_tracks": result.get("total_tracks", 0),
|
||
"midi_clips": result.get("midi_clips", 0),
|
||
"audio_clips": result.get("audio_clips", 0),
|
||
"devices_used": result.get("devices", []),
|
||
"samples_used": result.get("samples", []),
|
||
"production_time": result.get("production_time", "unknown"),
|
||
"export_history": result.get("exports", []),
|
||
"quality_score": result.get("quality_score", 0)
|
||
})
|
||
except Exception as e:
|
||
return _err(f"Error getting production report: {str(e)}")
|
||
|
||
|
||
# ==================================================================
|
||
# EXTRAS (T086-T095)
|
||
# ==================================================================
|
||
|
||
@mcp.tool()
|
||
def set_multiple_progressions(ctx: Context, progressions_config: list) -> str:
|
||
"""Configura progresiones de acordes para multiples secciones (T086).
|
||
|
||
Args:
|
||
progressions_config: Lista de dicts con {"section": "intro", "progression": "I-V-vi-IV"}
|
||
"""
|
||
try:
|
||
from engines.musical_intelligence import MusicalIntelligenceEngine
|
||
engine = MusicalIntelligenceEngine()
|
||
result = engine.set_multiple_progressions(progressions_config)
|
||
return _ok({
|
||
"sections_configured": result.get("sections", []),
|
||
"progressions_applied": result.get("progressions", []),
|
||
"chords_generated": result.get("total_chords", 0)
|
||
})
|
||
except Exception as e:
|
||
return _err(f"Error setting progressions: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def modulate_key(ctx: Context, section_index: int, new_key: str) -> str:
|
||
"""Modula a una nueva key en una seccion especifica (T087).
|
||
|
||
Args:
|
||
section_index: Indice de la seccion
|
||
new_key: Nueva tonalidad (ej: "Dm", "F#m", "C")
|
||
"""
|
||
try:
|
||
from engines.musical_intelligence import MusicalIntelligenceEngine
|
||
engine = MusicalIntelligenceEngine()
|
||
result = engine.modulate_key(section_index, new_key)
|
||
return _ok({
|
||
"section_index": section_index,
|
||
"original_key": result.get("original_key"),
|
||
"new_key": new_key,
|
||
"modulation_type": result.get("modulation_type", "direct"),
|
||
"tracks_affected": result.get("tracks_affected", [])
|
||
})
|
||
except Exception as e:
|
||
return _err(f"Error modulating key: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def enable_parallel_processing(ctx: Context, enabled: bool = True) -> str:
|
||
"""Activa/desactiva procesamiento paralelo para operaciones pesadas (T092).
|
||
|
||
Args:
|
||
enabled: True para activar, False para desactivar
|
||
"""
|
||
try:
|
||
from engines.workflow_engine import WorkflowEngine
|
||
engine = WorkflowEngine()
|
||
result = engine.set_parallel_processing(enabled)
|
||
return _ok({
|
||
"parallel_processing": enabled,
|
||
"max_workers": result.get("max_workers", 4),
|
||
"affected_operations": result.get("operations", ["render", "analyze", "generate"])
|
||
})
|
||
except Exception as e:
|
||
return _err(f"Error setting parallel processing: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def get_memory_usage(ctx: Context) -> str:
|
||
"""Obtiene el uso de memoria del sistema y del proyecto (T094)."""
|
||
try:
|
||
import psutil
|
||
process = psutil.Process()
|
||
system_memory = psutil.virtual_memory()
|
||
return _ok({
|
||
"process_memory_mb": process.memory_info().rss / 1024 / 1024,
|
||
"process_memory_percent": process.memory_percent(),
|
||
"system_total_mb": system_memory.total / 1024 / 1024,
|
||
"system_available_mb": system_memory.available / 1024 / 1024,
|
||
"system_percent_used": system_memory.percent,
|
||
"live_processes": len([p for p in psutil.process_iter() if "ableton" in p.name().lower()])
|
||
})
|
||
except ImportError:
|
||
return _err("psutil not available. Install with: pip install psutil")
|
||
except Exception as e:
|
||
return _err(f"Error getting memory usage: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def get_progress_report(ctx: Context) -> str:
|
||
"""Reporte detallado de progreso del proyecto actual (T095)."""
|
||
try:
|
||
from engines.workflow_engine import WorkflowEngine
|
||
engine = WorkflowEngine()
|
||
result = engine.get_progress_report()
|
||
return _ok({
|
||
"project_completion": result.get("completion", 0),
|
||
"phases_completed": result.get("phases_completed", []),
|
||
"current_phase": result.get("current_phase", "unknown"),
|
||
"tasks_done": result.get("tasks_done", 0),
|
||
"tasks_total": result.get("tasks_total", 0),
|
||
"time_invested": result.get("time_invested", "0h 0m"),
|
||
"milestones": result.get("milestones", [])
|
||
})
|
||
except Exception as e:
|
||
return _err(f"Error getting progress report: {str(e)}")
|
||
|
||
|
||
|
||
|
||
# ==================================================================
|
||
# PLAYBACK, ARRANGEMENT & LIBRARY TOOLS (core fixes)
|
||
# ==================================================================
|
||
|
||
@mcp.tool()
|
||
def fire_all_clips(ctx: Context, scene_index: int = 0, start_playback: bool = True) -> str:
|
||
"""Fire every clip in a Session View scene so you can hear what was created.
|
||
|
||
Call this immediately after any produce_* / generate_* command to start playback.
|
||
Without this, clips exist in Live but are silent (they need to be fired).
|
||
|
||
Args:
|
||
scene_index: Which scene row to fire (default 0 = first scene)
|
||
start_playback: Also call Start Playing on the transport (default True)
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"fire_all_clips",
|
||
{"scene_index": scene_index, "start_playback": start_playback},
|
||
timeout=15.0,
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def record_to_arrangement(ctx: Context, duration_bars: int = 8) -> str:
|
||
"""Record Session View clips into Arrangement View so you can see and edit them.
|
||
|
||
Enables arrangement overdub, fires scene 0, records for `duration_bars` bars,
|
||
then stops and switches Ableton to Arrangement View automatically.
|
||
|
||
Args:
|
||
duration_bars: How many bars to record (default 8)
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"record_to_arrangement",
|
||
{"duration_bars": duration_bars},
|
||
timeout=duration_bars * 4.0 + 30.0, # generous timeout
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def scan_library(ctx: Context, subfolder: str = "", extensions: list = None) -> str:
|
||
"""Scan the libreria/ sample library and return all available samples categorized by folder.
|
||
|
||
Use this to discover what samples are available before loading them.
|
||
Returns file paths you can use with load_sample_direct.
|
||
|
||
Args:
|
||
subfolder: Sub-folder to scan e.g. "reggaeton/kick" (default = all)
|
||
extensions: File extensions to include e.g. [".wav", ".mp3"] (default all audio)
|
||
"""
|
||
params = {"subfolder": subfolder}
|
||
if extensions:
|
||
params["extensions"] = extensions
|
||
return _proxy_ableton_command("scan_library", params, timeout=20.0)
|
||
|
||
|
||
@mcp.tool()
|
||
def load_sample_direct(ctx: Context, track_index: int, file_path: str,
|
||
slot_index: int = 0, warp: bool = True,
|
||
auto_fire: bool = False) -> str:
|
||
"""Load a sample from libreria/ directly onto a track by absolute file path.
|
||
|
||
This is the most reliable way to use your sample library — bypasses the
|
||
Live browser entirely. Works with any WAV, AIF, or MP3 file.
|
||
|
||
Args:
|
||
track_index: Track index in Ableton (0-based)
|
||
file_path: Absolute path OR path relative to libreria/ root
|
||
slot_index: Clip slot index (default 0)
|
||
warp: Enable warping/tempo-sync (default True)
|
||
auto_fire: Fire the clip immediately after loading (default False)
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"load_sample_direct",
|
||
{
|
||
"track_index": track_index,
|
||
"file_path": file_path,
|
||
"slot_index": slot_index,
|
||
"warp": warp,
|
||
"auto_fire": auto_fire,
|
||
},
|
||
timeout=20.0,
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def produce_with_library(ctx: Context, genre: str = "reggaeton", tempo: int = 95,
|
||
key: str = "Am", bars: int = 16,
|
||
auto_play: bool = True,
|
||
record_arrangement: bool = True) -> str:
|
||
"""Complete one-shot music production using your real 511-sample library (Session View).
|
||
|
||
DEPRECATED: Consider using build_arrangement_timeline() for direct Arrangement View creation.
|
||
|
||
This tool creates content in Session View, which is Ableton's clip-launching paradigm.
|
||
For direct timeline-based composition, use build_arrangement_timeline() instead.
|
||
|
||
What it does:
|
||
1. Sets project tempo
|
||
2. Loads real drum samples (kick, snare, clap, hihat) from libreria/
|
||
3. Loads bass samples from libreria/
|
||
4. Generates a MIDI dembow drum pattern
|
||
5. Generates a MIDI bass line
|
||
6. Generates chord progression
|
||
7. Records to Arrangement View (if record_arrangement=True)
|
||
8. Fires all clips so you hear the result immediately
|
||
|
||
MIGRATION GUIDE:
|
||
- OLD (Session View): produce_with_library() → Clips in Session View, optionally recorded
|
||
- NEW (Arrangement): build_arrangement_timeline() → Direct timeline placement
|
||
- For timeline-based composition with precise bar positioning, use build_arrangement_timeline()
|
||
|
||
Args:
|
||
genre: Genre for sample selection, e.g. "reggaeton" (default "reggaeton")
|
||
tempo: BPM (default 95)
|
||
key: Musical key e.g. "Am", "Cm", "Gm" (default "Am")
|
||
bars: Pattern length in bars (default 16)
|
||
auto_play: Start playback immediately after building (default True)
|
||
record_arrangement: Also record to Arrangement View (default True — changed from False)
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"produce_with_library",
|
||
{
|
||
"genre": genre,
|
||
"tempo": tempo,
|
||
"key": key,
|
||
"bars": bars,
|
||
"auto_play": auto_play,
|
||
"record_arrangement": record_arrangement,
|
||
},
|
||
timeout=120.0,
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def build_song(ctx: Context,
|
||
genre: str = "reggaeton",
|
||
tempo: int = 95,
|
||
key: str = "Am",
|
||
style: str = "standard",
|
||
auto_record: bool = True) -> str:
|
||
"""Build a complete, intelligent song arrangement in Ableton Arrangement View.
|
||
|
||
*** USE THIS TOOL TO CREATE MUSIC — it's the definitive production command. ***
|
||
|
||
What it does automatically:
|
||
- Scans your libreria/ sample library (511 samples)
|
||
- Creates Kick, Snare, HiHat, Perc, Bass audio tracks with REAL samples
|
||
- Creates Dembow, Bass MIDI, Chords, Melody MIDI tracks with generated patterns
|
||
- Builds 5 song sections (Intro/Verse/Chorus/Bridge/Outro) each with different
|
||
clip variations (sparse intro, full chorus with melody, etc.)
|
||
- Records all sections to Arrangement View automatically section by section
|
||
- Switches Ableton to Arrangement View when done
|
||
|
||
The recording takes approximately:
|
||
4+8+8+4+4 = 28 bars × (60/tempo × 4) seconds per bar
|
||
|
||
At 95 BPM: ~70 seconds total recording time.
|
||
Ableton will show clips appearing in the Arrangement as it records.
|
||
|
||
Args:
|
||
genre: "reggaeton" (default) — which library folder to use for samples
|
||
tempo: Song BPM (default 95)
|
||
key: Musical key e.g. "Am", "Cm", "Gm" (default "Am")
|
||
style: Pattern style — "standard", "minimal", or "trap" (default "standard")
|
||
auto_record: Record to Arrangement View automatically (default True)
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"build_song",
|
||
{
|
||
"genre": genre,
|
||
"tempo": tempo,
|
||
"key": key,
|
||
"style": style,
|
||
"auto_record": auto_record,
|
||
},
|
||
timeout=300.0, # 5 min — enough for 28-bar recording at any tempo
|
||
)
|
||
|
||
|
||
@mcp.tool()
|
||
def get_recording_status(ctx: Context) -> str:
|
||
"""Check the progress of an in-progress arrangement recording.
|
||
|
||
Use this to poll while build_song or record_to_arrangement is running.
|
||
Returns current section name, phase, and seconds remaining in this section.
|
||
"""
|
||
return _proxy_ableton_command("get_recording_status", {}, timeout=5.0)
|
||
|
||
|
||
@mcp.tool()
|
||
def stop_recording(ctx: Context) -> str:
|
||
"""Stop any in-progress arrangement recording immediately.
|
||
|
||
Disables overdub, stops playback, and switches to Arrangement View.
|
||
Use this if you need to abort a build_song recording.
|
||
"""
|
||
return _proxy_ableton_command("stop_all_playback", {}, timeout=10.0)
|
||
|
||
|
||
# ==================================================================
|
||
# ARRANGEMENT-FIRST TOOLS (Direct timeline composition)
|
||
# ==================================================================
|
||
# These tools bypass Session View and create content directly in
|
||
# Arrangement View for timeline-based music production.
|
||
|
||
@mcp.tool()
|
||
def build_arrangement_timeline(ctx: Context,
|
||
sections_json: str,
|
||
genre: str = "reggaeton",
|
||
tempo: int = 95,
|
||
key: str = "Am",
|
||
style: str = "standard") -> str:
|
||
"""Build a complete song directly in Arrangement View.
|
||
|
||
*** PREFERRED TOOL FOR TIMELINE-BASED COMPOSITION ***
|
||
|
||
This is the ARRANGEMENT-FIRST alternative to produce_with_library().
|
||
Instead of creating clips in Session View first, this tool places
|
||
content directly on the Arrangement timeline at specified bar positions.
|
||
|
||
MIGRATION GUIDE from Session View workflow:
|
||
- OLD: produce_with_library() → Session View clips → record to arrangement
|
||
- NEW: build_arrangement_timeline() → Direct Arrangement View placement
|
||
|
||
sections_json format example:
|
||
[
|
||
{
|
||
"name": "Intro",
|
||
"start_bar": 0,
|
||
"duration_bars": 4,
|
||
"tracks": [
|
||
{"type": "drums", "variation": "minimal"},
|
||
{"type": "bass", "variation": "sparse"}
|
||
]
|
||
},
|
||
{
|
||
"name": "Verse",
|
||
"start_bar": 4,
|
||
"duration_bars": 16,
|
||
"tracks": [
|
||
{"type": "drums", "variation": "full"},
|
||
{"type": "bass", "variation": "standard"},
|
||
{"type": "chords", "variation": "i-v-vi-iv"}
|
||
]
|
||
},
|
||
{
|
||
"name": "Chorus",
|
||
"start_bar": 20,
|
||
"duration_bars": 8,
|
||
"tracks": [
|
||
{"type": "drums", "variation": "full"},
|
||
{"type": "bass", "variation": "melodic"},
|
||
{"type": "chords", "variation": "i-v-vi-iv"},
|
||
{"type": "melody", "variation": "lead"}
|
||
]
|
||
}
|
||
]
|
||
|
||
Track types: drums, bass, chords, melody, fx, perc
|
||
Variations:
|
||
- drums: minimal, standard, full, fill
|
||
- bass: sparse, standard, melodic, staccato
|
||
- chords: i-v-vi-iv, i-iv-v, i-vi-iv-v
|
||
- melody: sparse, medium, dense, lead
|
||
|
||
Args:
|
||
sections_json: JSON string defining song sections with bar positions
|
||
genre: Genre for sample selection (default "reggaeton")
|
||
tempo: BPM (default 95)
|
||
key: Musical key e.g. "Am", "Cm", "Gm" (default "Am")
|
||
style: Pattern style — "standard", "minimal", "trap" (default "standard")
|
||
|
||
Returns:
|
||
JSON with arrangement summary including section positions and tracks created.
|
||
"""
|
||
try:
|
||
import json as json_lib
|
||
sections = json_lib.loads(sections_json)
|
||
|
||
# Validate sections
|
||
if not isinstance(sections, list) or len(sections) == 0:
|
||
return _err("sections_json must be a non-empty list of section objects")
|
||
|
||
created_tracks = []
|
||
created_sections = []
|
||
|
||
# Create tracks first
|
||
track_types = set()
|
||
for section in sections:
|
||
for track in section.get("tracks", []):
|
||
track_types.add(track.get("type", "drums"))
|
||
|
||
# Create each track in Arrangement View
|
||
for track_type in track_types:
|
||
track_result = _send_to_ableton(
|
||
"create_arrangement_track",
|
||
{"track_type": track_type, "name": f"{track_type.title()} Arr"},
|
||
timeout=15.0
|
||
)
|
||
if track_result.get("status") == "success":
|
||
created_tracks.append({
|
||
"type": track_type,
|
||
"index": track_result.get("result", {}).get("track_index", -1)
|
||
})
|
||
|
||
# Create sections at their bar positions
|
||
for section in sections:
|
||
section_name = section.get("name", "Section")
|
||
start_bar = section.get("start_bar", 0)
|
||
duration = section.get("duration_bars", 8)
|
||
|
||
section_tracks = []
|
||
for track_def in section.get("tracks", []):
|
||
track_type = track_def.get("type", "drums")
|
||
variation = track_def.get("variation", "standard")
|
||
|
||
# Find the track index for this type
|
||
track_index = None
|
||
for t in created_tracks:
|
||
if t["type"] == track_type:
|
||
track_index = t["index"]
|
||
break
|
||
|
||
if track_index is not None:
|
||
# Create section content
|
||
resp = _send_to_ableton(
|
||
"create_section_at_bar",
|
||
{
|
||
"track_index": track_index,
|
||
"section_type": section_name.lower(),
|
||
"at_bar": start_bar,
|
||
"duration_bars": duration,
|
||
"key": key
|
||
},
|
||
timeout=30.0
|
||
)
|
||
if resp.get("status") == "success":
|
||
section_tracks.append({
|
||
"type": track_type,
|
||
"variation": variation,
|
||
"track_index": track_index
|
||
})
|
||
|
||
created_sections.append({
|
||
"name": section_name,
|
||
"start_bar": start_bar,
|
||
"duration_bars": duration,
|
||
"tracks": section_tracks
|
||
})
|
||
|
||
return _ok({
|
||
"arrangement_type": "timeline_direct",
|
||
"genre": genre,
|
||
"tempo": tempo,
|
||
"key": key,
|
||
"style": style,
|
||
"tracks_created": len(created_tracks),
|
||
"sections_created": len(created_sections),
|
||
"section_details": created_sections,
|
||
"view": "Arrangement",
|
||
"note": "Content created directly in Arrangement View (not Session View)"
|
||
})
|
||
|
||
except json_lib.JSONDecodeError as e:
|
||
return _err(f"Invalid JSON in sections_json: {str(e)}")
|
||
except Exception as e:
|
||
logger.exception("build_arrangement_timeline: failed")
|
||
return _err(f"Error building arrangement timeline: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def create_section_at_bar(ctx: Context,
|
||
track_index: int,
|
||
section_type: str,
|
||
at_bar: float,
|
||
duration_bars: float = 8,
|
||
key: str = "Am") -> str:
|
||
"""Create a song section (intro/verse/chorus/bridge/outro) at specific bar position.
|
||
|
||
Creates content directly in Arrangement View at the specified bar position.
|
||
This is a building block for timeline-based composition.
|
||
|
||
Section types and their characteristics:
|
||
- intro: Sparse arrangement, minimal drums, building elements
|
||
- verse: Full drums, bass, chords; moderate energy
|
||
- chorus: Full arrangement with melody, highest energy
|
||
- bridge: Different progression, transitional energy
|
||
- outro: Fading elements, breakdown
|
||
- build: Rising energy, preparing for drop
|
||
- drop: Maximum impact, all elements
|
||
|
||
Args:
|
||
track_index: Index of the target track
|
||
section_type: Type of section — intro, verse, chorus, bridge, outro, build, drop
|
||
at_bar: Starting bar position in the arrangement
|
||
duration_bars: Length of the section in bars (default 8)
|
||
key: Musical key for harmonic content (default "Am")
|
||
|
||
Returns:
|
||
JSON with section creation status and clip details.
|
||
"""
|
||
# Map section types to content generation parameters
|
||
section_configs = {
|
||
"intro": {"density": "sparse", "variation": "minimal"},
|
||
"verse": {"density": "medium", "variation": "standard"},
|
||
"chorus": {"density": "full", "variation": "full"},
|
||
"bridge": {"density": "medium", "variation": "melodic"},
|
||
"outro": {"density": "sparse", "variation": "fade"},
|
||
"build": {"density": "building", "variation": "rising"},
|
||
"drop": {"density": "maximum", "variation": "impact"},
|
||
}
|
||
|
||
config = section_configs.get(section_type.lower(), section_configs["verse"])
|
||
|
||
try:
|
||
resp = _send_to_ableton(
|
||
"create_section_at_bar",
|
||
{
|
||
"track_index": track_index,
|
||
"section_type": section_type.lower(),
|
||
"at_bar": at_bar,
|
||
"duration_bars": duration_bars,
|
||
"key": key,
|
||
"density": config["density"],
|
||
"variation": config["variation"]
|
||
},
|
||
timeout=30.0
|
||
)
|
||
|
||
if resp.get("status") == "success":
|
||
return _ok({
|
||
"track_index": track_index,
|
||
"section_type": section_type,
|
||
"at_bar": at_bar,
|
||
"duration_bars": duration_bars,
|
||
"key": key,
|
||
"config": config,
|
||
"view": "Arrangement",
|
||
"message": f"Created {section_type} at bar {at_bar} on track {track_index}"
|
||
})
|
||
return _err(resp.get("message", f"Failed to create {section_type} at bar {at_bar}"))
|
||
|
||
except Exception as e:
|
||
logger.exception("create_section_at_bar: failed")
|
||
return _err(f"Error creating section: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def create_arrangement_track(ctx: Context,
|
||
track_type: str,
|
||
name: str = None,
|
||
insert_at_bar: float = 0) -> str:
|
||
"""Create a new track directly in Arrangement View.
|
||
|
||
Creates a track specifically for timeline-based arrangement composition.
|
||
The track is ready for clips to be placed at specific bar positions.
|
||
|
||
Track types and their purposes:
|
||
- drums: Drum patterns, percussive elements
|
||
- bass: Basslines, low-frequency content
|
||
- chords: Harmonic content, pads, rhythmic chords
|
||
- melody: Lead lines, melodic elements
|
||
- fx: Effects, risers, impacts, transitions
|
||
- perc: Additional percussion layers
|
||
|
||
Args:
|
||
track_type: Type of track — drums, bass, chords, melody, fx, perc
|
||
name: Optional custom name for the track (default: auto-generated from type)
|
||
insert_at_bar: Position hint for initial track focus (default 0)
|
||
|
||
Returns:
|
||
JSON with track creation status and track index.
|
||
"""
|
||
try:
|
||
# Auto-generate name if not provided
|
||
if name is None:
|
||
name = f"{track_type.title()} Arr"
|
||
|
||
resp = _send_to_ableton(
|
||
"create_arrangement_track",
|
||
{
|
||
"track_type": track_type,
|
||
"name": name,
|
||
"insert_at_bar": insert_at_bar
|
||
},
|
||
timeout=15.0
|
||
)
|
||
|
||
if resp.get("status") == "success":
|
||
result = resp.get("result", {})
|
||
return _ok({
|
||
"track_index": result.get("track_index", -1),
|
||
"track_type": track_type,
|
||
"name": name,
|
||
"view": "Arrangement",
|
||
"message": f"Created {track_type} track '{name}' at index {result.get('track_index', -1)}"
|
||
})
|
||
return _err(resp.get("message", f"Failed to create {track_type} track"))
|
||
|
||
except Exception as e:
|
||
logger.exception("create_arrangement_track: failed")
|
||
return _err(f"Error creating arrangement track: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def get_arrangement_status(ctx: Context) -> str:
|
||
"""Get detailed status of Arrangement View content.
|
||
|
||
Returns information about all clips currently in the Arrangement View,
|
||
including their positions, lengths, and track assignments.
|
||
|
||
Use this to inspect the current timeline composition state.
|
||
|
||
Returns:
|
||
JSON with arrangement details:
|
||
- total_clips: Number of clips in arrangement
|
||
- arrangement_length_beats: Total length in beats
|
||
- unique_start_positions: Sorted clip start points (bar map)
|
||
- clips: List of clip details with track, name, position, length
|
||
- tracks: Summary of tracks with clip counts
|
||
"""
|
||
try:
|
||
resp = _send_to_ableton(
|
||
"get_arrangement_clips",
|
||
{},
|
||
timeout=10.0
|
||
)
|
||
|
||
if resp.get("status") == "success":
|
||
result = resp.get("result", {})
|
||
return _ok({
|
||
"view": "Arrangement",
|
||
"total_clips": result.get("total_clips", 0),
|
||
"arrangement_length_beats": result.get("arrangement_length_beats", 0),
|
||
"unique_start_positions": result.get("unique_start_positions", []),
|
||
"clips": result.get("clips", []),
|
||
"tracks_summary": result.get("tracks_summary", {}),
|
||
"status": "ready" if result.get("total_clips", 0) > 0 else "empty"
|
||
})
|
||
return _err(resp.get("message", "Failed to get arrangement status"))
|
||
|
||
except Exception as e:
|
||
logger.exception("get_arrangement_status: failed")
|
||
return _err(f"Error getting arrangement status: {str(e)}")
|
||
|
||
|
||
# ------------------------------------------------------------------
|
||
# SESSION VS ARRANGEMENT MIGRATION NOTES
|
||
# ------------------------------------------------------------------
|
||
# OLD SESSION-VIEW-FIRST TOOLS (Deprecated patterns):
|
||
# - produce_with_library() → Creates Session clips, optionally records
|
||
# - produce_reggaeton() → Session View based
|
||
# - generate_*_clip() → Creates clips in Session View slots
|
||
#
|
||
# NEW ARRANGEMENT-FIRST TOOLS (Preferred):
|
||
# - build_arrangement_timeline() → Direct timeline composition
|
||
# - create_section_at_bar() → Place sections at specific bars
|
||
# - create_arrangement_track() → Create timeline-ready tracks
|
||
# - get_arrangement_status() → Inspect timeline state
|
||
# - generate_intelligent_track() → One-prompt professional track creation
|
||
#
|
||
# RECOMMENDED WORKFLOW:
|
||
# 1. Use build_arrangement_timeline() for complete songs
|
||
# 2. Use create_section_at_bar() for individual sections
|
||
# 3. Use create_arrangement_track() for custom track layouts
|
||
# 4. Use get_arrangement_status() to verify timeline content
|
||
# 5. Use generate_intelligent_track() for one-prompt music creation
|
||
# ------------------------------------------------------------------
|
||
|
||
|
||
# ------------------------------------------------------------------
|
||
# INTELLIGENT TRACK GENERATION
|
||
# ------------------------------------------------------------------
|
||
|
||
@mcp.tool()
|
||
def generate_intelligent_track(ctx: Context,
|
||
description: str,
|
||
structure_type: str = "standard",
|
||
variation_level: str = "medium",
|
||
coherence_threshold: float = 0.90,
|
||
include_vocal_placeholder: bool = True,
|
||
surprise_mode: bool = False,
|
||
save_as_preset: bool = True) -> str:
|
||
"""Generate complete professional track with intelligent sample selection.
|
||
|
||
ONE-PROMPT MUSIC CREATION:
|
||
This tool creates a complete, professional-quality track from a single
|
||
description. It handles sample selection, coherence validation,
|
||
arrangement creation, and mixing automatically.
|
||
|
||
Args:
|
||
description: Natural language description of desired track.
|
||
Examples:
|
||
- "reggaeton perreo intenso 95bpm Am"
|
||
- "romantico suave 90bpm Gm con piano"
|
||
- "trap oscuro 140bpm Cm, agresivo"
|
||
|
||
structure_type: Song structure template.
|
||
Options: "tiktok" (30s), "short" (1min),
|
||
"standard" (3min), "extended" (4-5min)
|
||
|
||
variation_level: How much samples vary between sections.
|
||
"low" = same samples throughout
|
||
"medium" = subtle variations
|
||
"high" = distinct but coherent variations
|
||
|
||
coherence_threshold: Minimum professional coherence (0.0-1.0).
|
||
Default 0.90 (professional grade).
|
||
Will iterate until achieved or fail explicitly.
|
||
|
||
include_vocal_placeholder: Add empty track for vocals.
|
||
|
||
surprise_mode: If True, introduces controlled randomness
|
||
for unique but coherent results each time.
|
||
|
||
save_as_preset: Save the resulting kit as reusable preset.
|
||
|
||
Returns:
|
||
JSON with complete track info, coherence scores, rationale,
|
||
and preset name if saved.
|
||
|
||
Example:
|
||
generate_intelligent_track(
|
||
description="reggaeton perreo intenso 95bpm Am",
|
||
structure_type="standard",
|
||
variation_level="high",
|
||
coherence_threshold=0.90
|
||
)
|
||
"""
|
||
return _proxy_ableton_command(
|
||
"generate_intelligent_track",
|
||
{
|
||
"description": description,
|
||
"structure_type": structure_type,
|
||
"variation_level": variation_level,
|
||
"coherence_threshold": coherence_threshold,
|
||
"include_vocal_placeholder": include_vocal_placeholder,
|
||
"surprise_mode": surprise_mode,
|
||
"save_as_preset": save_as_preset,
|
||
},
|
||
timeout=300.0, # 5 minutes for full track generation
|
||
defaults={
|
||
"description": description,
|
||
"structure_type": structure_type,
|
||
}
|
||
)
|
||
|
||
|
||
# ------------------------------------------------------------------
|
||
# ARRANGEMENT INJECTION TOOLS
|
||
# ------------------------------------------------------------------
|
||
|
||
@mcp.tool()
|
||
def create_arrangement_audio_pattern(ctx: Context, track_index: int, file_path: str,
|
||
positions: str, name: str = "") -> str:
|
||
'''Create audio clips in Arrangement View directly from file.
|
||
|
||
Args:
|
||
track_index: Target track index
|
||
file_path: Absolute path to audio file
|
||
positions: JSON list of beat positions (e.g., "[0.0, 16.0, 32.0]")
|
||
name: Optional clip name
|
||
|
||
Returns:
|
||
JSON with created clip info
|
||
'''
|
||
try:
|
||
import json
|
||
pos_list = json.loads(positions)
|
||
if not isinstance(pos_list, list):
|
||
return _err("positions must be a JSON list of beat positions")
|
||
|
||
resp = _send_to_ableton(
|
||
"create_arrangement_audio_pattern",
|
||
{"track_index": track_index, "file_path": file_path,
|
||
"positions": pos_list, "name": name},
|
||
timeout=TIMEOUTS["create_arrangement_audio_pattern"]
|
||
)
|
||
|
||
if resp.get("status") == "success":
|
||
return _ok({
|
||
"track_index": track_index,
|
||
"file_path": file_path,
|
||
"positions": pos_list,
|
||
"clips_created": len(pos_list),
|
||
"name": name,
|
||
"view": "Arrangement",
|
||
})
|
||
return _err(resp.get("message", "Failed to create arrangement audio pattern"))
|
||
except json.JSONDecodeError:
|
||
return _err("Invalid JSON in positions parameter. Expected format: '[0.0, 16.0, 32.0]'")
|
||
except Exception as e:
|
||
return _err(f"Error creating arrangement audio pattern: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def create_arrangement_midi_clip(ctx: Context, track_index: int, start_time: float,
|
||
length: float, notes: str) -> str:
|
||
'''Create MIDI clip in Arrangement View.
|
||
|
||
Args:
|
||
track_index: Target track index
|
||
start_time: Start position in beats
|
||
length: Clip length in beats
|
||
notes: JSON list of note dicts [{"pitch": 60, "start": 0.0, "duration": 0.5, "velocity": 100}]
|
||
|
||
Returns:
|
||
JSON with created clip info
|
||
'''
|
||
try:
|
||
import json
|
||
notes_list = json.loads(notes)
|
||
if not isinstance(notes_list, list):
|
||
return _err("notes must be a JSON list of note dictionaries")
|
||
|
||
resp = _send_to_ableton(
|
||
"create_arrangement_midi_clip",
|
||
{"track_index": track_index, "start_time": start_time,
|
||
"length": length, "notes": notes_list},
|
||
timeout=TIMEOUTS["create_arrangement_midi_clip"]
|
||
)
|
||
|
||
if resp.get("status") == "success":
|
||
return _ok({
|
||
"track_index": track_index,
|
||
"start_time": start_time,
|
||
"length": length,
|
||
"notes_added": len(notes_list),
|
||
"view": "Arrangement",
|
||
})
|
||
return _err(resp.get("message", "Failed to create arrangement MIDI clip"))
|
||
except json.JSONDecodeError:
|
||
return _err('Invalid JSON in notes parameter. Expected format: \'[{"pitch": 60, "start": 0.0, "duration": 0.5, "velocity": 100}]\'')
|
||
except Exception as e:
|
||
return _err(f"Error creating arrangement MIDI clip: {str(e)}")
|
||
|
||
|
||
# ------------------------------------------------------------------
|
||
# AUDIO ANALYSIS TOOLS
|
||
# ------------------------------------------------------------------
|
||
|
||
@mcp.tool()
|
||
def analyze_audio_file(ctx: Context, file_path: str) -> str:
|
||
'''Analyze audio file and extract features (BPM, key, spectral).
|
||
|
||
Args:
|
||
file_path: Absolute path to audio file
|
||
|
||
Returns:
|
||
JSON with AudioFeatures (bpm, key, duration, spectral features, etc.)
|
||
'''
|
||
try:
|
||
if not os.path.isfile(file_path):
|
||
return _err(f"Audio file not found: {file_path}")
|
||
|
||
from engines.audio_analyzer_dual import AudioAnalyzerDual
|
||
|
||
analyzer = AudioAnalyzerDual(backend="auto")
|
||
features = analyzer.analyze_sample(file_path)
|
||
|
||
# Convert AudioFeatures dataclass to dict
|
||
result = {
|
||
"file_path": file_path,
|
||
"bpm": features.bpm,
|
||
"key": features.key,
|
||
"duration": features.duration,
|
||
"spectral_centroid": features.spectral_centroid,
|
||
"spectral_rolloff": features.spectral_rolloff,
|
||
"zero_crossing_rate": features.zero_crossing_rate,
|
||
"rms_energy": features.rms_energy,
|
||
"key_confidence": features.key_confidence,
|
||
"sample_type": features.sample_type,
|
||
"is_harmonic": features.is_harmonic,
|
||
"is_percussive": features.is_percussive,
|
||
"suggested_genres": features.suggested_genres,
|
||
}
|
||
|
||
return _ok(result)
|
||
except ImportError:
|
||
return _err("Audio analyzer engine not available.")
|
||
except Exception as e:
|
||
return _err(f"Error analyzing audio file: {str(e)}")
|
||
|
||
|
||
# ------------------------------------------------------------------
|
||
# DIVERSITY & COHERENCE TOOLS
|
||
# ------------------------------------------------------------------
|
||
|
||
@mcp.tool()
|
||
def reset_diversity_memory(ctx: Context) -> str:
|
||
'''Reset cross-generation diversity memory for fresh session.
|
||
|
||
Returns:
|
||
Confirmation message
|
||
'''
|
||
try:
|
||
from engines.coherence_system import reset_all_memory
|
||
|
||
reset_all_memory()
|
||
|
||
return _ok({
|
||
"status": "success",
|
||
"message": "Diversity memory reset successfully. All generation history cleared.",
|
||
})
|
||
except ImportError:
|
||
return _err("Coherence system not available.")
|
||
except Exception as e:
|
||
return _err(f"Error resetting diversity memory: {str(e)}")
|
||
|
||
|
||
@mcp.tool()
|
||
def get_sample_fatigue_report(ctx: Context) -> str:
|
||
'''Get sample usage fatigue report.
|
||
|
||
Returns:
|
||
JSON with most used samples by role
|
||
'''
|
||
try:
|
||
from engines.coherence_system import get_coherence_memory_stats
|
||
|
||
stats = get_coherence_memory_stats()
|
||
|
||
return _ok({
|
||
"status": "success",
|
||
"report": stats,
|
||
})
|
||
except ImportError:
|
||
return _err("Coherence system not available.")
|
||
except Exception as e:
|
||
return _err(f"Error getting sample fatigue report: {str(e)}")
|
||
|
||
|
||
# ------------------------------------------------------------------
|
||
# PROFESSIONAL MIXING TOOLS
|
||
# ------------------------------------------------------------------
|
||
|
||
@mcp.tool()
|
||
def apply_professional_mix(ctx: Context, track_assignments: str) -> str:
|
||
'''Apply complete professional mix with buses and returns.
|
||
|
||
Args:
|
||
track_assignments: JSON dict mapping track indices to roles
|
||
(e.g., '{"0": "kick", "1": "snare", "2": "bass"}')
|
||
|
||
Returns:
|
||
JSON with applied mix configuration
|
||
'''
|
||
try:
|
||
import json
|
||
assignments = json.loads(track_assignments)
|
||
if not isinstance(assignments, dict):
|
||
return _err("track_assignments must be a JSON object mapping track indices to roles")
|
||
|
||
# Convert string keys to integers (JSON keys are always strings)
|
||
parsed_assignments = {}
|
||
for k, v in assignments.items():
|
||
try:
|
||
parsed_assignments[int(k)] = v
|
||
except ValueError:
|
||
return _err(f"Invalid track index: {k}. Must be an integer.")
|
||
|
||
from engines.bus_architecture import apply_professional_mix
|
||
from engines.tcp_client import get_ableton_connection
|
||
|
||
ableton_conn = get_ableton_connection()
|
||
if ableton_conn is None:
|
||
return _err("Unable to connect to Ableton Live")
|
||
|
||
result = apply_professional_mix(ableton_conn, parsed_assignments)
|
||
|
||
return _ok({
|
||
"status": "success",
|
||
"message": "Professional mix applied successfully",
|
||
"configuration": result,
|
||
"tracks_processed": len(parsed_assignments),
|
||
})
|
||
except json.JSONDecodeError:
|
||
return _err('Invalid JSON in track_assignments. Expected format: \'{"0": "kick", "1": "snare"}\'')
|
||
except ImportError as e:
|
||
return _err(f"Required engine not available: {str(e)}")
|
||
except Exception as e:
|
||
return _err(f"Error applying professional mix: {str(e)}")
|
||
|
||
|
||
# ------------------------------------------------------------------
|
||
# MAIN
|
||
# ------------------------------------------------------------------
|
||
if __name__ == "__main__":
|
||
mcp.run()
|