diff --git a/AbletonMCP_AI/mcp_server/ai_loop.py b/AbletonMCP_AI/mcp_server/ai_loop.py index 153e70f..5e55e63 100644 --- a/AbletonMCP_AI/mcp_server/ai_loop.py +++ b/AbletonMCP_AI/mcp_server/ai_loop.py @@ -66,7 +66,7 @@ SongScore schema: "meta": { "title": "", "tempo": <85-105>, - "key": "", + "key": "", "genre": "reggaeton", "time_signature": "4/4", "gap_bars": <1.0-4.0> @@ -92,35 +92,49 @@ SongScore schema: ] } -Available sample subfolders — use EXACTLY these values in the "sample" field: - "kick/auto" -> Kick drums - "snare/auto" -> Snares - "hi-hat (para percs normalmente)/auto" -> Hi-hat / percussion - "drumloops/auto" -> Drum loops - "perc loop/auto" -> Percussion loops - "bass/auto" -> Bass samples - "fx/auto" -> FX/transitions +Available sample categories — use EXACTLY "category/auto" in the "sample" field: + "kick/auto" -> Kick drums (23 samples: main + reggaeton 3 + SentimientoLatino) + "snare/auto" -> Snares (29 samples) + "hihat/auto" -> Hi-hats (6 samples) + "drumloops/auto" -> Drum loops with BPM (70 samples, 83-160 BPM range) + "perc/auto" -> Percussion loops (21 samples) + "bass/auto" -> Bass samples (41 samples) + "fx/auto" -> FX and transitions (45 samples) + "synth/auto" -> Synth leads, plucks, arps (54 samples) + "pad/auto" -> Pads and textures (23 samples) + "keys/auto" -> Piano, rhodes, keys (13 samples) + "vocals/auto" -> Vocal chops, phrases, ad-libs (42 samples) + "oneshots/auto" -> One-shot melodic hits (63 samples) + "impact/auto" -> Impact hits (7 samples) + "fill/auto" -> Drum fills (5 samples) + "bells/auto" -> Bells and mallets (16 samples) + "chords/auto" -> Chord samples and MIDI (56 samples) + "guitar/auto" -> Guitar loops (3 samples) + "brass/auto" -> Brass hits (included in oneshots) + "music_loop/auto" -> Full music loops (7 samples) -IMPORTANT: "auto" is a keyword that means "pick the best sample automatically". -Do NOT write "subfolder/auto" literally — that is an instruction, not a valid path. +The system automatically picks the BEST sample matching the project BPM and key. -Available MIDI patterns: +Available MIDI patterns (use in "pattern" field for type:"midi" tracks): dembow_minimal dembow_standard dembow_double bass_sub bass_pluck bass_octaves bass_sustained chords_verse chords_chorus melody_simple -Available EQ presets: kick snare bass synth master -compression_preset is accepted but currently ignored (reserved for future use). +Available EQ presets: kick, kick_sub, kick_punch, snare, snare_body, snare_crack, + bass, bass_clean, synth, synth_air, pad_warm, master Rules: - Every track MUST have at least one clip. - Every clip MUST reference a valid section name from the structure array. - Always include at minimum: kick, snare or drum_loop, dembow, bass tracks. +- Use 6-12 tracks for a full production. Be creative with synths, pads, vocals, bells. - Vary everything: title, tempo, key, gap_bars, structure length (40-90 total bars). - Use realistic reggaeton/latin structures (Intro, Verse, Pre-Chorus, Chorus, Bridge, Outro). -- Mix audio and MIDI tracks creatively. -- Section names MUST be unique. Use numbered suffixes: "Intro", "Verse A", "Pre-Chorus", "Chorus A", "Verse B", "Chorus B", "Bridge", "Outro". NEVER repeat a section name. -- Do NOT include "start_bar" in sections. The engine calculates it automatically from duration_bars and gap_bars. +- Mix audio and MIDI tracks creatively. Use diverse sample categories. +- Section names MUST be unique. Use numbered suffixes: "Intro", "Verse A", "Pre-Chorus", + "Chorus A", "Verse B", "Chorus B", "Bridge", "Outro". NEVER repeat a section name. +- Do NOT include "start_bar" in sections. The engine calculates it automatically. +- Audio tracks use "sample" field. MIDI tracks use "pattern" field. Do NOT mix them. - Output ONLY the JSON object. Nothing else. """ diff --git a/AbletonMCP_AI/mcp_server/engines/bpm_key_parser.py b/AbletonMCP_AI/mcp_server/engines/bpm_key_parser.py new file mode 100644 index 0000000..3a3ed2c --- /dev/null +++ b/AbletonMCP_AI/mcp_server/engines/bpm_key_parser.py @@ -0,0 +1,140 @@ +""" +Extract BPM and musical key from sample filenames. + +Covers naming conventions across multiple sample libraries: +- "98bpm yera drumloop.wav" +- "@16bloody - 98bpm vente .wav" +- "Midilatino_Sativa_A_Min_94BPM_Lead.wav" +- "SS_RNBL_Amor_Music_89_F_maj.wav" +- "90bpm reggaeton antiguo drumloop.wav" +- "(extra) 100bpm pop drumloop.wav" +- "Midilatino_Cupid_G#m_140BPM_Bass.wav" +- "LOOP 31 92bpm @dastin.prod.wav" +""" + +import re +from typing import Optional, Tuple +from pathlib import Path + +_NOTE_MAP = { + "c": 0, "c#": 1, "db": 1, "d": 2, "d#": 3, "eb": 3, + "e": 4, "f": 5, "f#": 6, "gb": 6, "g": 7, "g#": 8, + "ab": 8, "a": 9, "a#": 10, "bb": 10, "b": 11, +} + +_KEY_ALIASES = { + "cm": "Cm", "c#m": "C#m", "dbm": "Cm", "dm": "Dm", "ebm": "D#m", + "em": "Em", "fm": "Fm", "f#m": "F#m", "gbm": "F#m", "gm": "Gm", + "g#m": "G#m", "abm": "G#m", "am": "Am", "a#m": "A#m", "bbm": "A#m", "bm": "Bm", + "cmin": "Cm", "c#min": "C#m", "dmin": "Dm", "emin": "Em", + "fmin": "Fm", "f#min": "F#m", "gmin": "Gm", "g#min": "G#m", + "amin": "Am", "bmin": "Bm", "ebmin": "D#m", "bbmin": "A#m", + "dbmajor": "C#Maj", "ebmajor": "D#Maj", +} + + +def parse_bpm(filename: str) -> Optional[float]: + """Extract BPM from a filename. Returns None if not found.""" + name = Path(filename).stem + patterns = [ + re.compile(r"(\d{2,3})\s*bpm", re.IGNORECASE), + re.compile(r"bpm\s*(\d{2,3})", re.IGNORECASE), + re.compile(r"[_\s](\d{2,3})[_\s]", re.IGNORECASE), + re.compile(r"(\d{2,3})BPM", re.IGNORECASE), + ] + for pat in patterns: + m = pat.search(name) + if m: + val = float(m.group(1)) + if 40.0 <= val <= 300.0: + return val + nums = re.findall(r"(\d{2,3})", name) + for n in nums: + val = float(n) + if 60.0 <= val <= 200.0: + likely = any(kw in name.lower() for kw in [ + "bpm", "loop", "beat", "drum", "groove", "perc" + ]) + if likely: + return val + return None + + +def _normalize_key(note: str, quality: str) -> Optional[str]: + note_lower = note.lower().replace("\u266f", "#").replace("\u266d", "b") + semitone = _NOTE_MAP.get(note_lower) + if semitone is None: + return None + for name, val in _NOTE_MAP.items(): + if val == semitone: + if len(name) == 1: + root = name.upper() + else: + root = name[0].upper() + name[1:] + break + else: + root = note + return f"{root}m" if quality == "minor" else f"{root}Maj" + + +def parse_key(filename: str) -> Optional[str]: + """Extract musical key from a filename. Returns 'Am', 'C#m', 'FMaj', etc.""" + name = Path(filename).stem + + # Pattern 1: Note_Quality separated by underscores/dashes/dots + # Examples: A_Min, G#_Maj, F#_Min, C_minor, D#_m, E_maj + m = re.search( + r"[_\s\-\.]([A-Ga-g][#.\u266f\u266d]?)[_\s\-\.](Min|Maj|Major|Minor|min|maj|m|minor)[_\s\-\.]", + name, re.IGNORECASE + ) + if m: + note = m.group(1) + quality_raw = m.group(2).lower() + quality = "minor" if quality_raw.startswith("min") or quality_raw == "m" else "major" + return _normalize_key(note, quality) + + # Pattern 2: Compact form like Am, C#m, Gm, BbMaj + m = re.search(r"[_\s\-\.]([A-Ga-g][#.\u266f\u266d]?)(m|min|Maj|major|minor)[_\s\-\.Bb\d]", + name, re.IGNORECASE) + if m: + note = m.group(1) + quality_raw = m.group(2).lower() + quality = "minor" if quality_raw.startswith("m") and quality_raw != "maj" else "major" + if quality_raw in ("m", "min", "minor"): + quality = "minor" + return _normalize_key(note, quality) + + # Pattern 3: _Cmin, _F#min, _G#m (no separator after quality) + m = re.search(r"[_\s\-\.]([A-Ga-g][#.\u266f\u266d]?)(m|min|Maj|major|minor)(?:[_\s\-\.]|BPM|bpm|$)", + name, re.IGNORECASE) + if m: + note = m.group(1) + quality_raw = m.group(2).lower() + quality = "minor" if quality_raw in ("m", "min", "minor") else "major" + return _normalize_key(note, quality) + + # Pattern 4: SS_RNBL style - _F_maj, _C_min, _D#_Min + m = re.search(r"[_\s\-]([A-Ga-g][#.\u266f\u266d]?)_(maj|min|m|Maj|Min)[_\s\-\.]", + name, re.IGNORECASE) + if m: + note = m.group(1) + quality_raw = m.group(2).lower() + quality = "minor" if quality_raw in ("min", "m") else "major" + return _normalize_key(note, quality) + + # Pattern 5: Bare note name (less reliable, major by default) + m = re.search(r"[_\s]([A-Ga-g][#.\u266f\u266d]?)[_\s\-\.]", name) + if m: + bare = m.group(1) + root_lower = bare.lower().replace("\u266f", "#").replace("\u266d", "b") + if root_lower in _NOTE_MAP and len(bare) <= 2: + return _normalize_key(bare, "major") + + return None + + +def parse_sample_metadata(filename: str) -> dict: + return { + "bpm": parse_bpm(filename), + "key": parse_key(filename), + } \ No newline at end of file diff --git a/AbletonMCP_AI/mcp_server/engines/populate_bpm_from_filenames.py b/AbletonMCP_AI/mcp_server/engines/populate_bpm_from_filenames.py new file mode 100644 index 0000000..15af051 --- /dev/null +++ b/AbletonMCP_AI/mcp_server/engines/populate_bpm_from_filenames.py @@ -0,0 +1,200 @@ +""" +Populate BPM and key in sample_metadata.db from filenames. + +Uses bpm_key_parser to extract BPM and key from filenames, +then updates the SQLite database for all 511+ samples. + +Usage: + python populate_bpm_from_filenames.py +""" + +import sqlite3 +import os +import sys +from pathlib import Path + +DB_PATH = Path(r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\libreria\reggaeton\sample_metadata.db") +LIBRERIA = Path(r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\libreria\reggaeton") + +sys.path.insert(0, str(Path(__file__).parent)) +from bpm_key_parser import parse_bpm, parse_key + + +def update_existing_samples(): + conn = sqlite3.connect(str(DB_PATH)) + conn.row_factory = sqlite3.Row + c = conn.cursor() + + c.execute("SELECT path, bpm, key FROM samples") + rows = c.fetchall() + + updated_bpm = 0 + updated_key = 0 + skipped = 0 + + for row in rows: + path = row["path"] + current_bpm = row["bpm"] + current_key = row["key"] + + filename = os.path.basename(path) + + parsed_bpm = parse_bpm(filename) + parsed_key = parse_key(filename) + + updates = {} + if parsed_bpm and (current_bpm is None or current_bpm == 0.0): + updates["bpm"] = parsed_bpm + updated_bpm += 1 + if parsed_key and (current_key is None or current_key == "" or current_key == "C"): + updates["key"] = parsed_key + updated_key += 1 + + if updates: + set_clause = ", ".join(f"{k} = ?" for k in updates) + values = list(updates.values()) + [path] + c.execute(f"UPDATE samples SET {set_clause} WHERE path = ?", values) + else: + skipped += 1 + + conn.commit() + conn.close() + + print(f"Updated BPM: {updated_bpm}") + print(f"Updated key: {updated_key}") + print(f"Skipped (no parseable data): {skipped}") + + +def scan_and_add_new_samples(): + conn = sqlite3.connect(str(DB_PATH)) + conn.row_factory = sqlite3.Row + c = conn.cursor() + c.execute("SELECT path FROM samples") + existing = {row["path"] for row in c.fetchall()} + + added = 0 + for root, dirs, files in os.walk(str(LIBRERIA)): + for f in files: + if not f.lower().endswith(('.wav', '.aif', '.aiff', '.mp3')): + continue + + full_path = os.path.join(root, f) + rel_path = os.path.relpath(full_path, str(LIBRERIA)) + + if rel_path in existing: + continue + + parsed_bpm = parse_bpm(f) + parsed_key = parse_key(f) + + c.execute( + "INSERT OR IGNORE INTO samples (path, bpm, key, analyzed_at) VALUES (?, ?, ?, datetime('now'))", + (rel_path, parsed_bpm, parsed_key) + ) + + subfolder = os.path.dirname(rel_path).lower() + category = _infer_category(subfolder, f) + if category: + c.execute( + "INSERT OR IGNORE INTO sample_categories (path, category) VALUES (?, ?)", + (rel_path, category) + ) + + added += 1 + existing.add(rel_path) + + conn.commit() + conn.close() + + print(f"Added new samples: {added}") + + +def _infer_category(subfolder: str, filename: str) -> str: + subfolder_lower = subfolder.lower() + filename_lower = filename.lower() + + if "kick" in subfolder_lower or "kick" in filename_lower: + return "kick" + if "snare" in subfolder_lower or "snare" in filename_lower: + return "snare" + if "hi-hat" in subfolder_lower or "hihat" in subfolder_lower or "hi hat" in subfolder_lower: + return "hihat" + if "clap" in subfolder_lower or "clap" in filename_lower: + return "clap" + if "bass" in subfolder_lower or "bass" in filename_lower: + return "bass" + if "perc" in subfolder_lower or "perc" in filename_lower: + return "perc" + if "drum" in subfolder_lower or "drumloop" in filename_lower or "loop" in filename_lower: + return "drumloops" + if "fx" in subfolder_lower or "effect" in subfolder_lower or "riser" in filename_lower or "impact" in filename_lower: + return "fx" + if "synth" in subfolder_lower or "synth" in filename_lower or "lead" in filename_lower: + return "synths" + if "melod" in subfolder_lower or "melody" in filename_lower: + return "melody" + if "one shot" in subfolder_lower or "oneshot" in subfolder_lower: + return "oneshots" + if "chord" in subfolder_lower or "chord" in filename_lower or "progres" in filename_lower: + return "chords" + if "pad" in subfolder_lower or "pad" in filename_lower: + return "pads" + if "guitar" in subfolder_lower or "guitar" in filename_lower: + return "guitar" + if "brass" in subfolder_lower or "brass" in filename_lower: + return "brass" + if "bell" in subfolder_lower or "bell" in filename_lower: + return "bells" + if "key" in subfolder_lower or "piano" in subfolder_lower: + return "keys" + if "voc" in subfolder_lower or "voc" in filename_lower: + return "vocals" + if "fill" in filename_lower: + return "drumloops" + + return "other" + + +def verify_results(): + conn = sqlite3.connect(str(DB_PATH)) + c = conn.cursor() + + c.execute("SELECT COUNT(*) FROM samples WHERE bpm > 0") + with_bpm = c.fetchone()[0] + c.execute("SELECT COUNT(*) FROM samples") + total = c.fetchone()[0] + c.execute("SELECT COUNT(*) FROM samples WHERE key IS NOT NULL AND key != '' AND key != 'C'") + with_key = c.fetchone()[0] + + print(f"\n--- DB Summary ---") + print(f"Total samples: {total}") + print(f"With BPM > 0: {with_bpm}") + print(f"With meaningful key: {with_key}") + + c.execute("SELECT path, bpm, key FROM samples WHERE bpm > 0 ORDER BY bpm") + print("\nSamples with BPM:") + for row in c.fetchall(): + print(f" {row[0]}: {row[1]} BPM, key={row[2]}") + + c.execute("SELECT COUNT(DISTINCT category) FROM sample_categories") + print(f"\nDistinct categories: {c.fetchone()[0]}") + + c.execute("SELECT category, COUNT(*) FROM sample_categories GROUP BY category ORDER BY COUNT(*) DESC") + print("\nCategory counts:") + for row in c.fetchall(): + print(f" {row[0]}: {row[1]}") + + conn.close() + + +if __name__ == "__main__": + print("Phase 1: Update existing samples with parsed BPM/key from filenames...") + update_existing_samples() + + print("\nPhase 2: Scan for new samples not yet in DB...") + scan_and_add_new_samples() + + print("\nPhase 3: Verify results...") + verify_results() + + print("\nDone!") \ No newline at end of file diff --git a/AbletonMCP_AI/mcp_server/engines/recategorize_samples.py b/AbletonMCP_AI/mcp_server/engines/recategorize_samples.py new file mode 100644 index 0000000..fdbaa05 --- /dev/null +++ b/AbletonMCP_AI/mcp_server/engines/recategorize_samples.py @@ -0,0 +1,380 @@ +""" +Recategorize ALL samples in sample_metadata.db with clean, normalized categories. + +Maps the messy folder-based categories (e.g. "LATINOS - DRUM LOOPS", "33 Instrumental Loops") +to clean pipeline-ready categories: kick, snare, hihat, clap, drumloops, bass, perc, +fx, impact, synth, keys, pad, vocals, oneshots, melody, chords, guitar, brass, bells, fill. + +Also adds MIDI files from SentimientoLatino2025 and reggaeton 3 to the DB. + +Usage: + python recategorize_samples.py +""" + +import os +import sys +import sqlite3 +from pathlib import Path + +DB_PATH = Path(r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\libreria\reggaeton\sample_metadata.db") +LIBRERIA = Path(r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\libreria\reggaeton") + +CLEAN_CATEGORIES = { + "kick", "snare", "hihat", "clap", "drumloops", "bass", "perc", + "fx", "impact", "synth", "keys", "pad", "vocals", "oneshots", + "melody", "chords", "guitar", "brass", "bells", "fill", "music_loop", +} + + +def _infer_clean_category(rel_path: str, filename: str) -> str: + """Infer a clean category from path and filename. + + Priority: filename keywords > path keywords > folder name. + """ + path_lower = rel_path.lower().replace("\\", "/") + fn_lower = filename.lower() + + # --- Filename-based detection (highest priority) --- + + # Drum hits (oneshots) + if "kick" in fn_lower and "loop" not in fn_lower: + return "kick" + if "snare" in fn_lower and "loop" not in fn_lower: + return "snare" + if any(kw in fn_lower for kw in ("hi-hat", "hihat", "hi hat", "hh")): + if "loop" in fn_lower: + return "drumloops" + return "hihat" + if "clap" in fn_lower and "loop" not in fn_lower: + return "clap" + if "rim" in fn_lower and "loop" not in fn_lower: + return "perc" + + # Bass + if any(kw in fn_lower for kw in ("bass", "sub bass", "sub_", "reese", "resse", "808")): + if "loop" in fn_lower or "music" in path_lower: + return "music_loop" + return "bass" + + # FX and impacts + if any(kw in fn_lower for kw in ("impact", "camtazo", "hit")): + return "impact" + if any(kw in fn_lower for kw in ("riser", "sweep", "transition", "fx", "fx_")): + return "fx" + if "fill" in fn_lower: + return "fill" + + # Percussion loops / fills + if "perc" in fn_lower and ("loop" in fn_lower or path_lower.count("/") <= 2): + return "perc" + + # --- Path-based detection --- + + # reggaeton 3 specific folders + if "reggaeton 3" in path_lower: + if "/8. kicks" in path_lower: + return "kick" + if "/9. snare" in path_lower: + return "snare" + if "/10. percs" in path_lower: + return "perc" + if "/4. drum loops" in path_lower: + return "drumloops" + if "/5. fx" in path_lower: + return "fx" + if "/6. impact" in path_lower: + return "impact" + if "/7. fill" in path_lower: + return "fill" + if "/11. vocals" in path_lower: + return "vocals" + if "/3. one shots" in path_lower: + return "oneshots" + + # SentimientoLatino2025 /01/ specific folders + if "sentimientolatino2025" in path_lower: + if "drum loops" in path_lower: + return "drumloops" + if "one shots" in path_lower: + return "oneshots" + if "midi pack" in path_lower: + return "chords" + + # SentimientoLatino2025 /02/ specific folders + if "/02/" in path_lower and "sentimientolatino2025" in path_lower: + if "drum loops" in path_lower or "/23 " in path_lower: + return "drumloops" + if "music loops" in path_lower or "/07 " in path_lower: + return "music_loop" + if "instrumental loops" in path_lower or "/33 " in path_lower: + # Instrumental loops contain bass, keys, pads, etc + pass # fall through to filename analysis + if "one shots" in path_lower or "/20 " in path_lower: + return "oneshots" + if "vocals" in path_lower: + return "vocals" + + # --- Filename keyword-based for sample pack subfolders --- + + # Drum loops (filename patterns) + if "loop" in fn_lower and "drum" in path_lower: + return "drumloops" + if "loop" in fn_lower and "perc" in path_lower: + return "perc" + if any(kw in fn_lower for kw in ("drumloop", "drum_loop")): + return "drumloops" + + # SentimientoLatino2025 sample pack items - detect from filename keywords + if "_drums" in fn_lower: + return "drumloops" + if "_drum" in fn_lower: + return "drumloops" + if "_perc" in fn_lower: + return "perc" + if "_snare" in fn_lower: + return "snare" + + # Instruments + if any(kw in fn_lower for kw in ("chord", "bell_chord")): + return "chords" + if any(kw in fn_lower for kw in ("pad", "texture")): + return "pad" + if any(kw in fn_lower for kw in ("lead", "pluck")): + return "synth" + if any(kw in fn_lower for kw in ("arp", "arpeggio")): + return "synth" + if any(kw in fn_lower for kw in ("rhode", "rhodes", "piano", "keys")): + return "keys" + if any(kw in fn_lower for kw in ("guitar",)): + return "guitar" + if any(kw in fn_lower for kw in ("vocal", "vox", "voice")): + return "vocals" + if any(kw in fn_lower for kw in ("brass",)): + return "brass" + if any(kw in fn_lower for kw in ("bell", "mallet")): + return "bells" + if any(kw in fn_lower for kw in ("synth",)): + return "synth" + if any(kw in fn_lower for kw in ("cymatics", "fx", "transition", "riser")): + return "fx" + + # Main libreria folders + if path_lower.startswith("kick/"): + return "kick" + if path_lower.startswith("snare/"): + return "snare" + if "hi-hat" in path_lower or "hihat" in path_lower: + return "hihat" + if path_lower.startswith("drumloops/"): + return "drumloops" + if path_lower.startswith("perc loop/"): + return "perc" + if path_lower.startswith("bass/"): + return "bass" + if path_lower.startswith("fx/"): + return "fx" + if path_lower.startswith("oneshots/"): + return "oneshots" + + # Music loops + if "music" in path_lower and "loop" in path_lower: + return "music_loop" + + # Vocals + if "vocal" in path_lower: + return "vocals" + + # Instrumental loops - categorize by content + if "instrumental" in path_lower: + if "bass" in fn_lower: + return "bass" + if "pad" in fn_lower: + return "pad" + if "keys" in fn_lower: + return "keys" + if "fx" in fn_lower or "vocal" in fn_lower or "chop" in fn_lower: + return "fx" + return "synth" + + return "other" + + +def recategorize(): + conn = sqlite3.connect(str(DB_PATH)) + conn.row_factory = sqlite3.Row + c = conn.cursor() + + c.execute("SELECT path FROM samples") + rows = c.fetchall() + + # Clear all old categories + c.execute("DELETE FROM sample_categories") + + updated = 0 + category_counts = {} + + for row in rows: + path = row["path"] + filename = os.path.basename(path) + + category = _infer_clean_category(path, filename) + + c.execute( + "INSERT OR IGNORE INTO sample_categories (path, category) VALUES (?, ?)", + (path, category) + ) + + category_counts[category] = category_counts.get(category, 0) + 1 + updated += 1 + + conn.commit() + + print(f"Recategorized {updated} samples") + print("\nCategory distribution:") + for cat, count in sorted(category_counts.items(), key=lambda x: -x[1]): + print(f" {cat:15s}: {count:4d}") + + conn.close() + return category_counts + + +def add_midi_files(): + """Add MIDI files from SentimientoLatino2025 and reggaeton 3 to DB.""" + conn = sqlite3.connect(str(DB_PATH)) + c = conn.cursor() + + added = 0 + for root, dirs, files in os.walk(str(LIBRERIA)): + for f in files: + if not f.lower().endswith(('.mid', '.midi')): + continue + + full_path = os.path.join(root, f) + rel_path = os.path.relpath(full_path, str(LIBRERIA)) + + c.execute("SELECT 1 FROM samples WHERE path = ?", (rel_path,)) + if c.fetchone(): + continue + + # Parse BPM and key from MIDI filename + sys.path.insert(0, str(Path(__file__).parent)) + from bpm_key_parser import parse_bpm, parse_key + parsed_bpm = parse_bpm(f) + parsed_key = parse_key(f) + + c.execute( + "INSERT OR IGNORE INTO samples (path, bpm, key, analyzed_at) VALUES (?, ?, ?, datetime('now'))", + (rel_path, parsed_bpm, parsed_key) + ) + + # Infer category for MIDI + fn_lower = f.lower() + if "chord" in fn_lower or "progres" in fn_lower: + cat = "chords" + elif "arp" in fn_lower: + cat = "synth" + elif "bass" in fn_lower: + cat = "bass" + elif "drum" in fn_lower: + cat = "drumloops" + elif "lead" in fn_lower: + cat = "synth" + elif "melody" in fn_lower: + cat = "melody" + elif "pad" in fn_lower: + cat = "pad" + elif "piano" in fn_lower or "rhode" in fn_lower: + cat = "keys" + else: + cat = "chords" + + c.execute( + "INSERT OR IGNORE INTO sample_categories (path, category) VALUES (?, ?)", + (rel_path, cat) + ) + + added += 1 + + conn.commit() + conn.close() + print(f"Added {added} MIDI files to DB") + + +def verify(): + conn = sqlite3.connect(str(DB_PATH)) + c = conn.cursor() + + c.execute("SELECT COUNT(*) FROM samples") + total = c.fetchone()[0] + c.execute("SELECT COUNT(DISTINCT path) FROM sample_categories") + categorized = c.fetchone()[0] + c.execute("SELECT COUNT(*) FROM samples WHERE bpm > 0") + with_bpm = c.fetchone()[0] + c.execute("SELECT COUNT(*) FROM samples WHERE key IS NOT NULL AND key != ''") + with_key = c.fetchone()[0] + + print(f"\n{'='*50}") + print(f"DB Verification") + print(f"{'='*50}") + print(f"Total samples: {total}") + print(f"With categories: {categorized}") + print(f"With BPM > 0: {with_bpm}") + print(f"With key: {with_key}") + + # Show samples per source + c.execute(""" + SELECT + CASE + WHEN path LIKE 'SentimientoLatino%' THEN 'SentimientoLatino2025' + WHEN path LIKE 'reggaeton 3%' THEN 'reggaeton 3' + ELSE 'main library' + END as source, + COUNT(*) as count + FROM samples + GROUP BY source + """) + print("\nBy source:") + for row in c.fetchall(): + print(f" {row[0]:30s}: {row[1]:4d}") + + # Show category distribution by source + for source, pattern in [ + ("SentimientoLatino2025", "SentimientoLatino%"), + ("reggaeton 3", "reggaeton 3%"), + ("main library", "kick/%"), + ]: + if source == "main library": + print(f"\n{'-- main library categories --'}") + c.execute(""" + SELECT sc.category, COUNT(*) + FROM sample_categories sc + JOIN samples s ON sc.path = s.path + WHERE s.path NOT LIKE 'SentimientoLatino%' AND s.path NOT LIKE 'reggaeton 3%' + GROUP BY sc.category ORDER BY COUNT(*) DESC + """) + else: + print(f"\n{'-- ' + source + ' categories --'}") + c.execute(""" + SELECT sc.category, COUNT(*) + FROM sample_categories sc + JOIN samples s ON sc.path = s.path + WHERE s.path LIKE ? + GROUP BY sc.category ORDER BY COUNT(*) DESC + """, (pattern,)) + for row in c.fetchall(): + print(f" {row[0]:15s}: {row[1]:4d}") + + conn.close() + + +if __name__ == "__main__": + print("Phase 1: Recategorize all samples with clean categories...") + recategorize() + + print("\nPhase 2: Add MIDI files to DB...") + add_midi_files() + + print("\nPhase 3: Verify...") + verify() + + print("\nDone!") diff --git a/AbletonMCP_AI/mcp_server/score_renderer.py b/AbletonMCP_AI/mcp_server/score_renderer.py index f258596..c3db8b5 100644 --- a/AbletonMCP_AI/mcp_server/score_renderer.py +++ b/AbletonMCP_AI/mcp_server/score_renderer.py @@ -2,33 +2,45 @@ score_renderer.py — Translates a SongScore into Ableton Live SESSION VIEW operations via TCP. Architecture: - - Each SectionDef in score.structure → one Ableton Scene - - Each TrackDef in score.tracks → one Ableton track - - Each ClipDef in a track → clip slot at (track_index, scene_index) + - Each SectionDef in score.structure -> one Ableton Scene + - Each TrackDef in score.tracks -> one Ableton track + - Each ClipDef in a track -> clip slot at (track_index, scene_index) Session View mapping: - section "Verse" → scene index 1 - section "Chorus" → scene index 2 + section "Verse" -> scene index 1 + section "Chorus" -> scene index 2 ... Clip placement (Session View only): - MIDI tracks: create_clip + add_notes_to_clip - Audio tracks: load_sample_to_clip (loads .wav into a clip slot) -Pattern generators (all computed on server side — no Ableton logic needed): +Pattern generators (all computed on server side -- no Ableton logic needed): MIDI drums: dembow_minimal, dembow_standard, dembow_double MIDI bass: bass_sub, bass_pluck, bass_octaves, bass_sustained MIDI harmony: chords_verse, chords_chorus, melody_simple + +Sample selection uses SampleMetadataStore for BPM/key-aware picking. +Auto-warp: samples whose BPM differs from project tempo get warp_clip_to_bpm. """ import json +import logging import os +import sqlite3 import socket from pathlib import Path -from typing import Dict, List, Optional +from typing import Dict, List, Optional, Tuple from score_engine import SongScore, TrackDef, ClipDef +logger = logging.getLogger("ScoreRenderer") + +DB_PATH = Path( + r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts" + r"\libreria\reggaeton\sample_metadata.db" +) + # ------------------------------------------------------------------ # Ableton TCP transport (self-contained — no FastMCP dependency) # ------------------------------------------------------------------ @@ -77,23 +89,64 @@ def _send(cmd_type: str, params: dict, timeout: float = 30.0) -> dict: # Sample resolution — "kick/auto" or "kick/kick_01.wav" → absolute path # ------------------------------------------------------------------ -# Keyword mapping: invented filenames → correct folder/auto paths +# Keyword mapping: invented filenames or short refs -> category/auto paths +# These map to BOTH folder names (main library) and DB categories (all libraries) _SAMPLE_KEYWORD_MAP = { "kick": "kick/auto", "snare": "snare/auto", - "hihat": "hi-hat (para percs normalmente)/auto", - "hi-hat": "hi-hat (para percs normalmente)/auto", - "hat": "hi-hat (para percs normalmente)/auto", + "hihat": "hihat/auto", + "hi-hat": "hihat/auto", + "hat": "hihat/auto", + "clap": "snare/auto", "drumloop": "drumloops/auto", + "drum_loop": "drumloops/auto", "drum": "drumloops/auto", - "perc": "perc loop/auto", + "perc": "perc/auto", "bass": "bass/auto", "fx": "fx/auto", "transition": "fx/auto", "transicion": "fx/auto", "riser": "fx/auto", - "impact": "fx/auto", + "impact": "impact/auto", "oneshot": "oneshots/auto", + "synth": "synth/auto", + "lead": "synth/auto", + "pad": "pad/auto", + "keys": "keys/auto", + "vocal": "vocals/auto", + "guitar": "guitar/auto", + "melody": "melody/auto", + "fill": "fill/auto", + "chord": "chords/auto", + "bell": "bells/auto", + "brass": "brass/auto", + "music_loop": "music_loop/auto", +} + +# Map from folder-based auto path prefix to DB category name +_FOLDER_TO_CATEGORY = { + "kick": "kick", + "snare": "snare", + "hihat": "hihat", + "hi-hat (para percs normalmente)": "hihat", + "drumloops": "drumloops", + "perc loop": "perc", + "bass": "bass", + "fx": "fx", + "oneshots": "oneshots", + "synth": "synth", + "pad": "pad", + "keys": "keys", + "vocals": "vocals", + "guitar": "guitar", + "melody": "melody", + "fill": "fill", + "chords": "chords", + "bells": "bells", + "brass": "brass", + "impact": "impact", + "music_loop": "music_loop", + "perc": "perc", } # Valid MIDI pattern names @@ -176,56 +229,102 @@ def _sanitize_pattern_name(pattern: str) -> str: return "dembow_standard" # default fallback -def _resolve_sample(sample_ref: str, lib_root: str, tempo: float = 95.0) -> Optional[str]: - """Resolve a sample reference to an absolute filesystem path. +def _resolve_sample(sample_ref: str, lib_root: str, tempo: float = 95.0, + key: str = "") -> Tuple[Optional[str], Optional[float]]: + """Resolve a sample reference to an absolute filesystem path and its native BPM. Formats accepted: - "kick/auto" → best WAV from /kick/ - "kick/kick 1.wav" → exact file /kick/kick 1.wav - "kick 1.wav" → sanitized to "kick/auto" - "/C:/absolute/path.wav" → passthrough + "kick/auto" -> best WAV from /kick/ (BPM/key matched) + "kick/kick 1.wav" -> exact file /kick/kick 1.wav + "kick 1.wav" -> sanitized to "kick/auto" + "/C:/absolute/path.wav" -> passthrough + + Returns: (absolute_path, sample_native_bpm_or_None) """ if not sample_ref: - return None + return None, None # Sanitize invented filenames sample_ref = _sanitize_sample_ref(sample_ref) # Already absolute if os.path.isabs(sample_ref): - return sample_ref if os.path.isfile(sample_ref) else None + if os.path.isfile(sample_ref): + sample_bpm = _lookup_sample_bpm(sample_ref, lib_root) + return sample_ref, sample_bpm + return None, None parts = sample_ref.replace("\\", "/").split("/") if parts[-1].lower() == "auto": folder = os.path.join(lib_root, *parts[:-1]) - return _pick_best(folder, tempo) + return _pick_best(folder, tempo, key) else: # Exact relative path path = os.path.join(lib_root, *parts) if os.path.isfile(path): - return path + sample_bpm = _lookup_sample_bpm(path, lib_root) + return path, sample_bpm # Fallback: auto-select from the folder folder = os.path.join(lib_root, *parts[:-1]) if len(parts) > 1 else lib_root - best = _pick_best(folder, tempo) + best, bpm = _pick_best(folder, tempo, key) if best: - return best + return best, bpm # Last resort: try keyword mapping on the whole ref sanitized = _sanitize_sample_ref(sample_ref) if sanitized != sample_ref: - return _resolve_sample(sanitized, lib_root, tempo) + return _resolve_sample(sanitized, lib_root, tempo, key) + return None, None + + +def _lookup_sample_bpm(abs_path: str, lib_root: str) -> Optional[float]: + """Look up the native BPM of a sample from the metadata DB.""" + if not DB_PATH.exists(): return None + try: + rel = os.path.relpath(abs_path, lib_root).replace("\\", "/") + except ValueError: + return None -def _pick_best(folder: str, tempo: float = 95.0) -> Optional[str]: - """Pick the best audio file from a folder. + try: + conn = sqlite3.connect(str(DB_PATH)) + conn.row_factory = sqlite3.Row + c = conn.cursor() + c.execute("SELECT bpm FROM samples WHERE path = ?", (rel,)) + row = c.fetchone() + conn.close() + if row and row["bpm"] and row["bpm"] > 0: + return row["bpm"] + except Exception: + pass - Strategy: - 1. Prefer files whose name contains a BPM number close to project tempo. - 2. If no BPM info available, return the first file alphabetically. + from engines.bpm_key_parser import parse_bpm + parsed = parse_bpm(os.path.basename(abs_path)) + return parsed + + +def _pick_best(folder: str, tempo: float = 95.0, key: str = "") -> Tuple[Optional[str], Optional[float]]: + """Pick the best audio file from a folder, using metadata DB when available. + + Strategy (with DB): + 1. Query SampleMetadataStore for samples in the folder's category + that match BPM (within +-5) and key (if specified). + 2. Score candidates: BPM proximity + key match. + 3. Prefer tempo-matched samples for loops; oneshots need no BPM match. + + Strategy (without DB / fallback): + 1. Parse filenames for BPM numbers close to project tempo. + 2. Return first file alphabetically if no BPM info. + + Returns: (absolute_path, sample_bpm_or_None) """ + candidates = _pick_best_db(folder, tempo, key) + if candidates: + return candidates + if not os.path.isdir(folder): - return None + return None, None files = sorted([ os.path.join(folder, f) @@ -234,7 +333,7 @@ def _pick_best(folder: str, tempo: float = 95.0) -> Optional[str]: ]) if not files: - return None + return None, None def bpm_score(fpath: str) -> float: fname = os.path.basename(fpath).replace("-", " ").replace("_", " ") @@ -248,15 +347,133 @@ def _pick_best(folder: str, tempo: float = 95.0) -> Optional[str]: return 999.0 scores = [(bpm_score(f), f) for f in files] - best = min(scores, key=lambda x: x[0]) + best = min(scores, key=lambda x: x[0]) + best_path = best[1] if best[0] < 15.0 else files[0] - return best[1] if best[0] < 15.0 else files[0] + sample_bpm = None + if best[0] < 999.0: + sample_bpm = tempo - best[0] + + return best_path, sample_bpm + + +def _pick_best_db(folder: str, tempo: float, key: str) -> Optional[Tuple[str, Optional[float]]]: + """Try to pick best sample using the metadata DB with clean categories. + + Searches across ALL libraries (main, SentimientoLatino2025, reggaeton 3) + for samples matching the requested category, BPM, and key. + """ + if not DB_PATH.exists(): + return None + + folder_name = os.path.basename(folder).lower() + category = _FOLDER_TO_CATEGORY.get(folder_name, folder_name) + + # Oneshot categories (no BPM matching needed) + is_oneshot = category in ("kick", "snare", "hihat", "clap", "oneshots", "impact", "fill") + + lib_root = str(DB_PATH.parent) + + try: + conn = sqlite3.connect(str(DB_PATH)) + conn.row_factory = sqlite3.Row + c = conn.cursor() + + if is_oneshot: + c.execute( + """SELECT s.path, s.bpm, s.key FROM samples s + INNER JOIN sample_categories sc ON s.path = sc.path + WHERE sc.category = ?""", + (category,) + ) + else: + bpm_min = tempo - 5 + bpm_max = tempo + 5 + c.execute( + """SELECT s.path, s.bpm, s.key FROM samples s + INNER JOIN sample_categories sc ON s.path = sc.path + WHERE sc.category = ? + AND ((s.bpm >= ? AND s.bpm <= ?) OR s.bpm = 0 OR s.bpm IS NULL)""", + (category, bpm_min, bpm_max) + ) + + rows = c.fetchall() + conn.close() + + if not rows: + return None + + candidates = [] + for row in rows: + rel_path = row["path"] + abs_path = os.path.join(lib_root, rel_path) + if not os.path.isfile(abs_path): + continue + + # Audio tracks: only return WAV/AIF/MP3, not MIDI files + ext = os.path.splitext(rel_path)[1].lower() + if ext in ('.mid', '.midi'): + continue + + sample_bpm = row["bpm"] if row["bpm"] else None + sample_key = row["key"] if row["key"] else "" + + score = 0.0 + + # BPM proximity scoring (higher weight than key) + if sample_bpm and sample_bpm > 0: + bpm_diff = abs(sample_bpm - tempo) + if bpm_diff <= 0.5: + score -= 20.0 + elif bpm_diff <= 1.5: + score -= 15.0 + elif bpm_diff <= 3: + score -= 10.0 + elif bpm_diff <= 5: + score -= 5.0 + elif bpm_diff <= 10: + score -= 1.0 + + # Key matching scoring + if key and sample_key: + key_lower = key.lower().replace("maj", "").replace("min", "m").strip() + sk_lower = sample_key.lower().replace("maj", "").replace("min", "m").strip() + if key_lower == sk_lower: + score -= 8.0 + elif key_lower.replace("m", "") == sk_lower.replace("m", ""): + score -= 3.0 + + # Prefer main library samples slightly (more tested) + if "sentimientolatino" in rel_path.lower() or "reggaeton 3" in rel_path.lower(): + score += 0.5 + + candidates.append((score, abs_path, sample_bpm)) + + if not candidates: + return None + + candidates.sort(key=lambda x: x[0]) + best = candidates[0] + + return (best[1], best[2]) + + except Exception as exc: + logger.warning("DB lookup failed for category '%s': %s", category, exc) + return None # ------------------------------------------------------------------ -# MIDI pattern generators — pure Python, no Ableton communication +# MIDI pattern generators — use engines.pattern_library (Fase 4) # ------------------------------------------------------------------ +try: + from engines.pattern_library import ( + BassPatterns, ChordProgressions, MelodyGenerator + ) + _PATTERN_LIB_AVAILABLE = True +except ImportError: + _PATTERN_LIB_AVAILABLE = False + _KEY_ROOTS: Dict[str, int] = { "C": 48, "C#": 49, "Db": 49, "D": 50, "D#": 51, "Eb": 51, @@ -265,21 +482,33 @@ _KEY_ROOTS: Dict[str, int] = { "G": 55, "G#": 56, "Ab": 56, "A": 57, "A#": 58, "Bb": 58, "B": 59, - # Minor keys "Am": 45, "Dm": 38, "Em": 40, "Bm": 47, "F#m": 54, "C#m": 49, "Gm": 43, "Fm": 41, } def _root(key: str) -> int: - return _KEY_ROOTS.get(key, 45) # Default Am root + return _KEY_ROOTS.get(key, 45) + + +def _extract_root_note(key: str) -> str: + """Extract root note letter from key string: 'Am' -> 'A', 'F#m' -> 'F#', 'C' -> 'C'.""" + root = "" + for ch in key: + if ch.isalpha(): + root += ch + elif ch == "#": + root += ch + else: + break + return root if root else "A" def _gen_dembow(bars: int, variation: str, key: str) -> List[Dict]: """Dembow drum pattern on MIDI note 36 (kick).""" - bpb = 4 - total = bars * bpb - notes = [] + bpb = 4 + total = bars * bpb + notes = [] patterns = { "minimal": [0.0, 2.5], "standard": [0.0, 1.5, 2.0, 2.5, 3.0, 3.5], @@ -299,11 +528,30 @@ def _gen_dembow(bars: int, variation: str, key: str) -> List[Dict]: def _gen_bass(bars: int, style: str, key: str) -> List[Dict]: - """Sub-bass MIDI patterns.""" - root = _root(key) - bpb = 4 - notes = [] + """Bass MIDI patterns. Uses BassPatterns engine when available.""" + if _PATTERN_LIB_AVAILABLE: + style_map = { + "bass_sub": "sub", + "bass_pluck": "pluck", + "bass_octaves": "sub", + "bass_sustained": "sustained", + } + lib_style = style_map.get(style, "sub") + try: + events = BassPatterns.get_bass_line( + bars=bars, key=key, style=lib_style + ) + return [ + {"pitch": e.pitch, "start_time": e.start_time, + "duration": e.duration, "velocity": e.velocity} + for e in events + ] + except Exception as exc: + logger.warning("BassPatterns failed, using fallback: %s", exc) + root = _root(key) + bpb = 4 + notes = [] for bar in range(bars): b = bar * bpb if style == "bass_sub": @@ -330,18 +578,37 @@ def _gen_bass(bars: int, style: str, key: str) -> List[Dict]: ) else: notes.append({"pitch": root - 12, "start_time": b, "duration": 0.5, "velocity": 100}) - return notes def _gen_chords(bars: int, style: str, key: str) -> List[Dict]: - """Chord voicing patterns.""" - root = _root(key) - bpb = 4 - notes = [] + """Chord voicing patterns. Uses ChordProgressions engine when available.""" + if _PATTERN_LIB_AVAILABLE: + prog_map = { + "chords_verse": "i-VI-III-VII", + "chords_chorus": "vi-IV-I-V", + } + prog_name = prog_map.get(style, "i-VI-III-VII") + try: + chords = ChordProgressions.get_progression(prog_name, key=key, bars=bars) + notes = [] + for chord in chords: + for pitch in chord["notes"]: + notes.append({ + "pitch": pitch, + "start_time": chord["start_beat"], + "duration": chord["duration"] - 0.25, + "velocity": 72, + }) + return notes + except Exception as exc: + logger.warning("ChordProgressions failed, using fallback: %s", exc) - PROG_VERSE = [(0, 3, 7), (-5, -2, 2), (-3, 0, 4), (-7, -4, 0)] - PROG_CHORUS = [(0, 3, 7), (-3, 0, 4), (5, 8, 12), (0, 3, 7)] + root = _root(key) + bpb = 4 + notes = [] + PROG_VERSE = [(0, 3, 7), (-5, -2, 2), (-3, 0, 4), (-7, -4, 0)] + PROG_CHORUS = [(0, 3, 7), (-3, 0, 4), (5, 8, 12), (0, 3, 7)] prog = PROG_VERSE if "verse" in style else PROG_CHORUS for bar in range(bars): @@ -349,20 +616,32 @@ def _gen_chords(bars: int, style: str, key: str) -> List[Dict]: start = float(bar * bpb) for interval in chord_intervals: notes.append({ - "pitch": root + interval, + "pitch": root + interval, "start_time": start, - "duration": float(bpb) - 0.25, - "velocity": 72, + "duration": float(bpb) - 0.25, + "velocity": 72, }) - return notes def _gen_melody_simple(bars: int, key: str) -> List[Dict]: - """Simple pentatonic melodic line.""" - root = _root(key) + """Melodic line. Uses MelodyGenerator engine when available.""" + if _PATTERN_LIB_AVAILABLE: + try: + events = MelodyGenerator.generate_melody( + bars=bars, key=key, density=0.5 + ) + return [ + {"pitch": e.pitch, "start_time": e.start_time, + "duration": e.duration, "velocity": e.velocity} + for e in events + ] + except Exception as exc: + logger.warning("MelodyGenerator failed, using fallback: %s", exc) + + root = _root(key) scale = [0, 3, 5, 7, 10, 12] - bpb = 4 + bpb = 4 notes = [] rhythm = [0.0, 0.75, 1.5, 2.0, 2.75, 3.0, 3.5] @@ -375,7 +654,58 @@ def _gen_melody_simple(bars: int, key: str) -> List[Dict]: return notes -# Registry: pattern_name → generator(bars, key) → List[Dict] +def _detect_key_from_drumloops(tracks: List[TrackDef], lib_root: str, + tempo: float, default_key: str) -> str: + """Detect the musical key from the first drumloop sample found. + + When a drumloop has key metadata (from filename or DB), use it as the + project key for MIDI pattern transposition (Fase 5). + + Returns the detected key or default_key if none found. + """ + for track in tracks: + if track.type != "audio": + continue + for clip in track.clips: + if not clip.sample: + continue + sample_ref = _sanitize_sample_ref(clip.sample) + is_loop = any(kw in sample_ref.lower() for kw in + ("drumloop", "loop", "perc loop", "drum")) + if not is_loop: + continue + + abs_path, _ = _resolve_sample(sample_ref, lib_root, tempo) + if not abs_path: + continue + + if DB_PATH.exists(): + try: + rel = os.path.relpath(abs_path, lib_root).replace("\\", "/") + conn = sqlite3.connect(str(DB_PATH)) + conn.row_factory = sqlite3.Row + c = conn.cursor() + c.execute("SELECT key FROM samples WHERE path = ?", (rel,)) + row = c.fetchone() + conn.close() + if row and row["key"] and row["key"].strip(): + db_key = row["key"].strip() + if db_key != "C" and len(db_key) >= 2: + logger.info("Detected key '%s' from drumloop: %s", db_key, os.path.basename(abs_path)) + return db_key + except Exception: + pass + + from engines.bpm_key_parser import parse_key + fn_key = parse_key(os.path.basename(abs_path)) + if fn_key and fn_key != "C" and len(fn_key) >= 2: + logger.info("Detected key '%s' from drumloop filename: %s", fn_key, os.path.basename(abs_path)) + return fn_key + + return default_key + + +# Registry: pattern_name -> generator(bars, key) -> List[Dict] PATTERN_GENERATORS: Dict = { "dembow_minimal": lambda bars, key: _gen_dembow(bars, "minimal", key), "dembow_standard": lambda bars, key: _gen_dembow(bars, "standard", key), @@ -622,6 +952,14 @@ class ScoreRenderer: key = score.meta.get("key", "Am") tempo = score.meta.get("tempo", 95.0) + # Fase 5: Detect key from the first drumloop loaded and use it for MIDI. + # This ensures MIDI patterns follow the drumloop's harmonic context. + detected_key = _detect_key_from_drumloops(score.tracks, self.lib_root, tempo, key) + if detected_key != key: + logger.info("Harmonic coherence: overriding key %s -> %s (from drumloop)", key, detected_key) + result["detected_key"] = detected_key + key = detected_key + for track in score.tracks: if track.id not in track_index_map: continue @@ -641,15 +979,19 @@ class ScoreRenderer: clip_label = "%s_%s" % (section_name or "clip", track.id) if track.type == "audio": - self._place_audio_clip(t_idx, scene_idx, clip, clip_label, tempo, result) + self._place_audio_clip(t_idx, scene_idx, clip, clip_label, tempo, key, result) else: self._place_midi_clip(t_idx, scene_idx, clip, clip_label, key, result) def _place_audio_clip(self, track_idx: int, scene_idx: int, clip: ClipDef, label: str, - tempo: float, result: dict) -> None: - """Load an audio sample into a Session View clip slot.""" - sample_path = _resolve_sample(clip.sample, self.lib_root, tempo) + tempo: float, key: str, result: dict) -> None: + """Load an audio sample into a Session View clip slot. + + If the sample's native BPM differs from the project tempo, auto-warp + is applied using warp_clip_to_bpm (Fase 3). + """ + sample_path, sample_bpm = _resolve_sample(clip.sample, self.lib_root, tempo, key) if not sample_path: result["errors"].append( "Clip '%s': sample '%s' not found (lib_root=%s)" @@ -664,23 +1006,45 @@ class ScoreRenderer: "warp": clip.warp, }, timeout=30.0) - if resp.get("status") == "success" or resp.get("loaded"): - result["clips_created"] += 1 - else: + if resp.get("status") != "success" or not (resp.get("loaded") or resp.get("status") == "success"): resp2 = _send("load_sample_to_clip", { "track_index": track_idx, "clip_index": scene_idx, "sample_path": sample_path, }, timeout=30.0) - if resp2.get("status") == "success" or resp2.get("loaded"): - result["clips_created"] += 1 - else: + if not (resp2.get("status") == "success" or resp2.get("loaded")): result["errors"].append( "Audio clip '%s' failed: primary=%s fallback=%s path=%s" % (label, resp.get("error", resp.get("message", "?")), resp2.get("error", resp2.get("message", "?")), sample_path) ) + return + + result["clips_created"] += 1 + + # Auto-warp (Fase 3): if sample has a known BPM that differs from project + if sample_bpm and sample_bpm > 0 and abs(sample_bpm - tempo) > 1.5: + warp_resp = _send("warp_clip_to_bpm", { + "track_index": track_idx, + "clip_index": scene_idx, + "original_bpm": sample_bpm, + "target_bpm": tempo, + }, timeout=20.0) + if warp_resp.get("status") == "success": + logger.info( + "Auto-warped '%s': %.1f -> %.1f BPM", + label, sample_bpm, tempo + ) + result.setdefault("warped", []).append({ + "clip": label, + "from_bpm": sample_bpm, + "to_bpm": tempo, + }) + else: + logger.warning( + "Auto-warp failed for '%s': %s", label, warp_resp.get("message", "?") + ) def _place_midi_clip(self, track_idx: int, scene_idx: int, clip: ClipDef, label: str, diff --git a/AbletonMCP_AI/mcp_server/scores/glm_test_song.json b/AbletonMCP_AI/mcp_server/scores/glm_test_song.json new file mode 100644 index 0000000..dff2ddb --- /dev/null +++ b/AbletonMCP_AI/mcp_server/scores/glm_test_song.json @@ -0,0 +1,768 @@ +{ + "meta": { + "title": "Luna de Miel en el Block", + "tempo": 92, + "key": "Dm", + "genre": "reggaeton", + "time_signature": "4/4", + "gap_bars": 2.0, + "version": "1.0", + "created_at": "2026-04-14T15:32:00.103065" + }, + "structure": [ + { + "name": "Intro", + "start_bar": 0.0, + "duration_bars": 8.0 + }, + { + "name": "Verse A", + "start_bar": 10.0, + "duration_bars": 16.0 + }, + { + "name": "Pre-Chorus", + "start_bar": 28.0, + "duration_bars": 8.0 + }, + { + "name": "Chorus A", + "start_bar": 38.0, + "duration_bars": 16.0 + }, + { + "name": "Verse B", + "start_bar": 56.0, + "duration_bars": 16.0 + }, + { + "name": "Chorus B", + "start_bar": 74.0, + "duration_bars": 16.0 + }, + { + "name": "Bridge", + "start_bar": 92.0, + "duration_bars": 8.0 + }, + { + "name": "Chorus C", + "start_bar": 102.0, + "duration_bars": 16.0 + }, + { + "name": "Outro", + "start_bar": 120.0, + "duration_bars": 8.0 + } + ], + "tracks": [ + { + "id": "kick_main", + "name": "Kick Principal", + "type": "audio", + "clips": [ + { + "start_bar": 0.0, + "duration_bars": 8.0, + "section": "Intro", + "sample": "kick/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 10.0, + "duration_bars": 16.0, + "section": "Verse A", + "sample": "kick/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 28.0, + "duration_bars": 8.0, + "section": "Pre-Chorus", + "sample": "kick/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 38.0, + "duration_bars": 16.0, + "section": "Chorus A", + "sample": "kick/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 56.0, + "duration_bars": 16.0, + "section": "Verse B", + "sample": "kick/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 74.0, + "duration_bars": 16.0, + "section": "Chorus B", + "sample": "kick/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 92.0, + "duration_bars": 8.0, + "section": "Bridge", + "sample": "kick/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 102.0, + "duration_bars": 16.0, + "section": "Chorus C", + "sample": "kick/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 120.0, + "duration_bars": 8.0, + "section": "Outro", + "sample": "kick/auto", + "loop": true, + "warp": true + } + ], + "mixer": { + "volume": 0.9, + "pan": 0.0, + "eq_preset": "kick" + }, + "instrument": "Wavetable" + }, + { + "id": "snare_main", + "name": "Snare Reggaeton", + "type": "audio", + "clips": [ + { + "start_bar": 0.0, + "duration_bars": 8.0, + "section": "Intro", + "sample": "snare/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 10.0, + "duration_bars": 16.0, + "section": "Verse A", + "sample": "snare/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 28.0, + "duration_bars": 8.0, + "section": "Pre-Chorus", + "sample": "snare/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 38.0, + "duration_bars": 16.0, + "section": "Chorus A", + "sample": "snare/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 56.0, + "duration_bars": 16.0, + "section": "Verse B", + "sample": "snare/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 74.0, + "duration_bars": 16.0, + "section": "Chorus B", + "sample": "snare/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 92.0, + "duration_bars": 8.0, + "section": "Bridge", + "sample": "snare/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 102.0, + "duration_bars": 16.0, + "section": "Chorus C", + "sample": "snare/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 120.0, + "duration_bars": 8.0, + "section": "Outro", + "sample": "snare/auto", + "loop": true, + "warp": true + } + ], + "mixer": { + "volume": 0.85, + "pan": 0.0, + "eq_preset": "snare" + }, + "instrument": "Wavetable" + }, + { + "id": "hihat_perc", + "name": "Hi-Hat y Percusion", + "type": "audio", + "clips": [ + { + "start_bar": 10.0, + "duration_bars": 16.0, + "section": "Verse A", + "sample": "hi-hat (para percs normalmente)/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 28.0, + "duration_bars": 8.0, + "section": "Pre-Chorus", + "sample": "hi-hat (para percs normalmente)/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 38.0, + "duration_bars": 16.0, + "section": "Chorus A", + "sample": "hi-hat (para percs normalmente)/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 56.0, + "duration_bars": 16.0, + "section": "Verse B", + "sample": "hi-hat (para percs normalmente)/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 74.0, + "duration_bars": 16.0, + "section": "Chorus B", + "sample": "hi-hat (para percs normalmente)/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 92.0, + "duration_bars": 8.0, + "section": "Bridge", + "sample": "hi-hat (para percs normalmente)/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 102.0, + "duration_bars": 16.0, + "section": "Chorus C", + "sample": "hi-hat (para percs normalmente)/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 120.0, + "duration_bars": 8.0, + "section": "Outro", + "sample": "hi-hat (para percs normalmente)/auto", + "loop": true, + "warp": true + } + ], + "mixer": { + "volume": 0.7, + "pan": 0.15, + "eq_preset": "synth" + }, + "instrument": "Wavetable" + }, + { + "id": "dembow_pattern", + "name": "Dembow MIDI", + "type": "audio", + "clips": [ + { + "start_bar": 10.0, + "duration_bars": 16.0, + "section": "Verse A", + "sample": "dembow_standard", + "loop": true, + "warp": true + }, + { + "start_bar": 28.0, + "duration_bars": 8.0, + "section": "Pre-Chorus", + "sample": "dembow_double", + "loop": true, + "warp": true + }, + { + "start_bar": 38.0, + "duration_bars": 16.0, + "section": "Chorus A", + "sample": "dembow_double", + "loop": true, + "warp": true + }, + { + "start_bar": 56.0, + "duration_bars": 16.0, + "section": "Verse B", + "sample": "dembow_standard", + "loop": true, + "warp": true + }, + { + "start_bar": 74.0, + "duration_bars": 16.0, + "section": "Chorus B", + "sample": "dembow_double", + "loop": true, + "warp": true + }, + { + "start_bar": 92.0, + "duration_bars": 8.0, + "section": "Bridge", + "sample": "dembow_minimal", + "loop": true, + "warp": true + }, + { + "start_bar": 102.0, + "duration_bars": 16.0, + "section": "Chorus C", + "sample": "dembow_double", + "loop": true, + "warp": true + }, + { + "start_bar": 120.0, + "duration_bars": 8.0, + "section": "Outro", + "sample": "dembow_minimal", + "loop": true, + "warp": true + } + ], + "mixer": { + "volume": 0.75, + "pan": -0.1, + "eq_preset": "snare" + }, + "instrument": "Operator" + }, + { + "id": "perc_loop_main", + "name": "Perc Loop Tropical", + "type": "audio", + "clips": [ + { + "start_bar": 28.0, + "duration_bars": 8.0, + "section": "Pre-Chorus", + "sample": "perc loop/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 38.0, + "duration_bars": 16.0, + "section": "Chorus A", + "sample": "perc loop/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 74.0, + "duration_bars": 16.0, + "section": "Chorus B", + "sample": "perc loop/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 102.0, + "duration_bars": 16.0, + "section": "Chorus C", + "sample": "perc loop/auto", + "loop": true, + "warp": true + } + ], + "mixer": { + "volume": 0.55, + "pan": 0.3, + "eq_preset": "synth" + }, + "instrument": "Wavetable" + }, + { + "id": "bass_sub", + "name": "Bass Sub Oscuro", + "type": "audio", + "clips": [ + { + "start_bar": 10.0, + "duration_bars": 16.0, + "section": "Verse A", + "sample": "bass_sub", + "loop": true, + "warp": true + }, + { + "start_bar": 28.0, + "duration_bars": 8.0, + "section": "Pre-Chorus", + "sample": "bass_sub", + "loop": true, + "warp": true + }, + { + "start_bar": 38.0, + "duration_bars": 16.0, + "section": "Chorus A", + "sample": "bass_octaves", + "loop": true, + "warp": true + }, + { + "start_bar": 56.0, + "duration_bars": 16.0, + "section": "Verse B", + "sample": "bass_sub", + "loop": true, + "warp": true + }, + { + "start_bar": 74.0, + "duration_bars": 16.0, + "section": "Chorus B", + "sample": "bass_octaves", + "loop": true, + "warp": true + }, + { + "start_bar": 92.0, + "duration_bars": 8.0, + "section": "Bridge", + "sample": "bass_sustained", + "loop": true, + "warp": true + }, + { + "start_bar": 102.0, + "duration_bars": 16.0, + "section": "Chorus C", + "sample": "bass_octaves", + "loop": true, + "warp": true + }, + { + "start_bar": 120.0, + "duration_bars": 8.0, + "section": "Outro", + "sample": "bass_sub", + "loop": true, + "warp": true + } + ], + "mixer": { + "volume": 0.8, + "pan": 0.0, + "eq_preset": "bass" + }, + "instrument": "Operator" + }, + { + "id": "bass_pluck_hit", + "name": "Bass Pluck Accento", + "type": "audio", + "clips": [ + { + "start_bar": 38.0, + "duration_bars": 16.0, + "section": "Chorus A", + "sample": "bass_pluck", + "loop": true, + "warp": true + }, + { + "start_bar": 74.0, + "duration_bars": 16.0, + "section": "Chorus B", + "sample": "bass_pluck", + "loop": true, + "warp": true + }, + { + "start_bar": 102.0, + "duration_bars": 16.0, + "section": "Chorus C", + "sample": "bass_pluck", + "loop": true, + "warp": true + } + ], + "mixer": { + "volume": 0.45, + "pan": -0.2, + "eq_preset": "bass" + }, + "instrument": "Wavetable" + }, + { + "id": "bass_audio_layer", + "name": "Bass Audio Capa", + "type": "audio", + "clips": [ + { + "start_bar": 38.0, + "duration_bars": 16.0, + "section": "Chorus A", + "sample": "bass/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 74.0, + "duration_bars": 16.0, + "section": "Chorus B", + "sample": "bass/auto", + "loop": true, + "warp": true + }, + { + "start_bar": 102.0, + "duration_bars": 16.0, + "section": "Chorus C", + "sample": "bass/auto", + "loop": true, + "warp": true + } + ], + "mixer": { + "volume": 0.35, + "pan": 0.1, + "eq_preset": "bass" + }, + "instrument": "Wavetable" + }, + { + "id": "chords_verse_midi", + "name": "Acordes Verso", + "type": "audio", + "clips": [ + { + "start_bar": 10.0, + "duration_bars": 16.0, + "section": "Verse A", + "sample": "chords_verse", + "loop": true, + "warp": true + }, + { + "start_bar": 56.0, + "duration_bars": 16.0, + "section": "Verse B", + "sample": "chords_verse", + "loop": true, + "warp": true + } + ], + "mixer": { + "volume": 0.4, + "pan": -0.3, + "eq_preset": "synth" + }, + "instrument": "Wavetable" + }, + { + "id": "chords_chorus_midi", + "name": "Acordes Coro", + "type": "audio", + "clips": [ + { + "start_bar": 38.0, + "duration_bars": 16.0, + "section": "Chorus A", + "sample": "chords_chorus", + "loop": true, + "warp": true + }, + { + "start_bar": 74.0, + "duration_bars": 16.0, + "section": "Chorus B", + "sample": "chords_chorus", + "loop": true, + "warp": true + }, + { + "start_bar": 102.0, + "duration_bars": 16.0, + "section": "Chorus C", + "sample": "chords_chorus", + "loop": true, + "warp": true + } + ], + "mixer": { + "volume": 0.5, + "pan": -0.25, + "eq_preset": "synth" + }, + "instrument": "Wavetable" + }, + { + "id": "melody_main", + "name": "Melodia Principal", + "type": "audio", + "clips": [ + { + "start_bar": 38.0, + "duration_bars": 16.0, + "section": "Chorus A", + "sample": "melody_simple", + "loop": true, + "warp": true + }, + { + "start_bar": 74.0, + "duration_bars": 16.0, + "section": "Chorus B", + "sample": "melody_simple", + "loop": true, + "warp": true + }, + { + "start_bar": 102.0, + "duration_bars": 16.0, + "section": "Chorus C", + "sample": "melody_simple", + "loop": true, + "warp": true + } + ], + "mixer": { + "volume": 0.55, + "pan": 0.2, + "eq_preset": "synth" + }, + "instrument": "Wavetable" + }, + { + "id": "drumloop_intro", + "name": "Drum Loop Intro", + "type": "audio", + "clips": [ + { + "start_bar": 0.0, + "duration_bars": 8.0, + "section": "Intro", + "sample": "drumloops/auto", + "loop": true, + "warp": true + } + ], + "mixer": { + "volume": 0.6, + "pan": 0.0, + "eq_preset": "snare" + }, + "instrument": "Wavetable" + }, + { + "id": "fx_transition_1", + "name": "FX Transicion 1", + "type": "audio", + "clips": [ + { + "start_bar": 28.0, + "duration_bars": 8.0, + "section": "Pre-Chorus", + "sample": "fx/auto", + "loop": false, + "warp": true + } + ], + "mixer": { + "volume": 0.5, + "pan": 0.0, + "eq_preset": "synth" + }, + "instrument": "Wavetable" + }, + { + "id": "fx_transition_2", + "name": "FX Transicion 2", + "type": "audio", + "clips": [ + { + "start_bar": 92.0, + "duration_bars": 8.0, + "section": "Bridge", + "sample": "fx/auto", + "loop": false, + "warp": true + } + ], + "mixer": { + "volume": 0.5, + "pan": 0.0, + "eq_preset": "synth" + }, + "instrument": "Wavetable" + }, + { + "id": "fx_outro_riser", + "name": "FX Outro Riser", + "type": "audio", + "clips": [ + { + "start_bar": 120.0, + "duration_bars": 8.0, + "section": "Outro", + "sample": "fx/auto", + "loop": false, + "warp": true + } + ], + "mixer": { + "volume": 0.45, + "pan": 0.0, + "eq_preset": "synth" + }, + "instrument": "Wavetable" + } + ] +} \ No newline at end of file diff --git a/CLAUDE.md b/CLAUDE.md index 08b10a3..5227fb0 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -1,38 +1,50 @@ -# CLAUDE.md - AbletonMCP_AI v2.0 +# CLAUDE.md - AbletonMCP_AI v3.2 > **Canonical project context** for AI agents. > Read this BEFORE doing any work. ## CRITICAL RULES -1. **NEVER touch `libreria/` or `librerias/`** - User's 509 reggaeton samples. +1. **NEVER touch `libreria/` or `librerias/`** - User's sample library. 2. **NEVER delete project files** - Overwrite only. 3. **NEVER create debug .md files in project root** - All in `AbletonMCP_AI/docs/`. 4. **ALWAYS compile after changes**: `python -m py_compile ""` 5. **ALWAYS restart Ableton** after changes to `__init__.py`. -6. **Use PowerShell, absolute Windows paths**. +6. **STRICT SESSION VIEW ONLY** - Arrangement View is discarded for production. ## Architecture ``` AbletonMCP_AI/ -├── __init__.py # Remote Script (all-in-one, ~300 lines) -├── README.md # Documentation -├── docs/ # Sprints +├── __init__.py # Remote Script (All-in-one API) +├── docs/ # Sprints & SYSTEM_SCORE_RENDER.md └── mcp_server/ - ├── server.py # MCP server (~300 lines) - └── engines/ # Music logic + ├── server.py # MCP Server (130+ tools) + ├── score_engine.py # [NEW] Pure Python song data model + ├── score_renderer.py # [NEW] Session View renderer + ├── ai_loop.py # [NEW] Autonomous production loop + └── scores/ # [NEW] JSON song storage ``` +## Primary Workflow (Score → Render) + +The preferred way to produce music is the **Compose-then-Render** pipeline: + +1. **Compose**: Use `compose_from_template` or incremental `new_score` + `compose_*` tools. +2. **Review**: Use `get_score` to see the JSON structure. +3. **Save**: Use `save_score` to persist the canzone in `mcp_server/scores/`. +4. **Render**: Use `render_score` to inject the JSON into Ableton's Session View. +5. **Batch**: Use `render_all_scores` to produce multiple songs at once. + ## How It Works -1. **Ableton** loads `__init__.py` as a Control Surface -2. **Remote Script** starts TCP server on port 9877 -3. **MCP Server** (FastMCP over stdio) connects to Ableton via TCP -4. **OpenCode/opencode** sends tool calls to MCP Server via stdio +1. **Ableton** starts TCP server (9877). +2. **MCP tools** build a `SongScore` object in memory. +3. **Renderer** translates JSON sections to **Scenes** and definitions to **Clip Slots**. +4. **Patterns** (Dembow, Bass, etc.) are resolved server-side into MIDI notes. ## Workflow -- **Kimi** codes fast, implements features -- **Qwen** verifies, compiles, debugs, creates next sprint -- Sprints saved to `docs/` +- **Kimi** codes fast, implements features. +- **Qwen** verifies, compiles, debugs, creates next sprint. +- Refer to `docs/SYSTEM_SCORE_RENDER.md` for full technical details. diff --git a/QWEN.md b/QWEN.md index e93325f..a0e22ee 100644 --- a/QWEN.md +++ b/QWEN.md @@ -1,7 +1,7 @@ -# QWEN.md - AbletonMCP_AI v3.0 (Senior Architecture) +# QWEN.md - AbletonMCP_AI v3.2 (Score → Render) > **Context**: MCP-based system for controlling Ableton Live 12 from AI agents. -> **Architecture**: Senior v3.0 (Arrangement-first workflow). +> **Architecture**: Compose-then-Render v3.2 (**STRICT SESSION VIEW**). > **Team**: Qwen (verify/debug/architecture) + Kimi (fast coding). ## CRITICAL RULES (READ FIRST) @@ -9,7 +9,7 @@ 1. **NEVER touch `libreria/` or `librerias/`** - User's sample library. NEVER delete, move, or modify. These are read-only. 2. **NEVER delete project files** - Overwrite, don't delete then create. 3. **NEVER create debug .md files in project root** - All docs go in `AbletonMCP_AI/docs/`. -4. **NEVER use `rmdir /s /q` except for `__pycache__`** - Can accidentally delete the whole project. +4. **STRICT SESSION VIEW ONLY** - Arrangement View and its commands (`create_arrangement_*`) are DISCARDED for this sprint. All production goes to scenes and clip slots. 5. **NEVER modify Ableton's built-in scripts** - `_Framework`, `_APC`, `_Komplete_Kontrol`, etc. are not yours. 6. **ALWAYS compile after changes**: `python -m py_compile ""` 7. **ALWAYS restart Ableton Live** after changes to `__init__.py` (no hot-reload for Remote Scripts). @@ -23,32 +23,27 @@ ``` AI Agent (OpenCode/Claude/Kimi) ↓ Natural language prompts -MCP Server (FastMCP, stdio transport) +SongScore Engine (Pure Python Data Model) + ↓ JSON score representation +Score Renderer (Session View Translator) ↓ JSON commands via TCP socket -50+ Production Engines (drums, bass, melody, mixing, etc.) - ↓ Real-time clip creation LiveBridge (TCP → Ableton Live API) ↓ -Ableton Live 12 Suite → Arrangement View +Ableton Live 12 Suite → Session View Scenes & Clip Slots ``` ### Key Architecture Components | Component | File | Purpose | |-----------|------|---------| -| **Remote Script** | `AbletonMCP_AI/__init__.py` | Ableton Control Surface (~9752 lines). Starts TCP server on port 9877. Handles all Live API calls. | -| **MCP Server** | `AbletonMCP_AI/mcp_server/server.py` | FastMCP server (~6745 lines). Defines 114+ MCP tools. Communicates with Ableton via TCP. | -| **BPM Analyzer** | `AbletonMCP_AI/mcp_server/engines/bpm_analyzer.py` | Librosa-based BPM detection for 800+ samples. | -| **Spectral Coherence** | `AbletonMCP_AI/mcp_server/engines/spectral_coherence.py` | MFCC embeddings for sample similarity. | -| **Session Orchestrator** | `AbletonMCP_AI/mcp_server/engines/session_orchestrator.py` | MIDI instrument validation and auto-loading. | -| **Launcher** | `mcp_wrapper.py` | Entry point for MCP stdio transport. Imports and runs the server. | -| **Integration** | `AbletonMCP_AI/mcp_server/integration.py` | Senior Architecture coordinator. Wires all components together. | -| **LiveBridge** | `AbletonMCP_AI/mcp_server/engines/live_bridge.py` | Direct Ableton Live API execution. Creates clips, writes automation, routes tracks. | -| **Arrangement Recorder** | `AbletonMCP_AI/mcp_server/engines/arrangement_recorder.py` | State machine for Session→Arrangement recording. 7 states, musical quantization. | -| **Metadata Store** | `AbletonMCP_AI/mcp_server/engines/metadata_store.py` | SQLite database of pre-analyzed sample features. No numpy required for queries. | -| **Sample Selector** | `AbletonMCP_AI/mcp_server/engines/sample_selector.py` | Smart sample selection with coherence scoring. | -| **Mixing Engine** | `AbletonMCP_AI/mcp_server/engines/mixing_engine.py` | Professional mixing chains (EQ, compression, bus routing). | -| **Song Generator** | `AbletonMCP_AI/mcp_server/engines/song_generator.py` | Track generation from prompts. | +| **Remote Script** | `AbletonMCP_AI/__init__.py` | Ableton Control Surface. TCP server on port 9877. Handles all Live API calls. | +| **Score Engine** | `mcp_server/score_engine.py` | [Sprint 9] JSON data model for songs. Decoupled from Ableton logic. | +| **Score Renderer** | `mcp_server/score_renderer.py` | [Sprint 9] Translates JSON Score to Session View Scenes/Clips. | +| **AI Loop** | `mcp_server/ai_loop.py` | [Sprint 9] Autonomous production loop (Anthropic-compatible). | +| **Metadata Store** | `mcp_server/engines/metadata_store.py` | SQLite database of pre-analyzed sample features. No numpy required for queries. | +| **Sample Selector** | `mcp_server/engines/sample_selector.py` | Smart sample selection with coherence scoring. | +| **Mixing Engine** | `mcp_server/engines/mixing_engine.py` | Professional mixing chains (EQ, compression). | +| **LiveBridge** | `mcp_server/engines/live_bridge.py` | Direct Ableton Live API execution engine. | ### Directory Structure @@ -62,22 +57,12 @@ MIDI Remote Scripts/ │ ├── examples/ # Usage examples │ ├── presets/ # Saved configurations (.json) │ └── mcp_server/ -│ ├── server.py # MCP FastMCP server -│ ├── integration.py # Senior Architecture coordinator -│ ├── test_arrangement.py # Verification tests -│ └── engines/ # 65+ production engines -│ ├── sample_selector.py -│ ├── song_generator.py -│ ├── arrangement_recorder.py -│ ├── live_bridge.py -│ ├── mixing_engine.py -│ ├── metadata_store.py -│ ├── massive_selector.py -│ ├── coherence_system.py -│ ├── bpm_analyzer.py # Sprint 7: Librosa BPM detection -│ ├── spectral_coherence.py # Sprint 7: MFCC embeddings -│ └── session_orchestrator.py # Sprint 7: MIDI validation -│ └── ... (50+ more) +│ ├── server.py # MCP FastMCP server (130+ tools) +│ ├── score_engine.py # SongScore model +│ ├── score_renderer.py # Session View renderer +│ ├── ai_loop.py # AI production loop +│ ├── scores/ # [NEW] JSON songs folder +│ └── engines/ # Specialized production engines ├── libreria/ # User samples (READ-ONLY, git-ignored) ├── librerias/ # Organized samples (READ-ONLY, git-ignored) ├── mcp_wrapper.py # MCP server launcher @@ -214,11 +199,14 @@ Primary production workflow: - `validate_session` - Verify MIDI tracks have instruments - `fix_session_midi_tracks` - Auto-load instruments by track name -### Advanced -- `create_riser` / `create_downlifter` / `create_impact` - FX generation -- `automate_filter` / `generate_curve_automation` - Parameter automation -- `humanize_track` - Velocity/timing variations -- `apply_professional_mix` - Complete mix chain +### Score → Render Pipeline (Sprint 9) +- `new_score` / `get_score` - Score lifecycle +- `compose_from_template` - Quick song generation +- `compose_audio_track` / `compose_midi_track` - Direct composition +- `compose_pattern` - MIDI pattern application +- `save_score` / `load_score` - JSON persistence +- `render_score` - Inject score into Session View (Scene-by-scene) +- `render_all_scores` - Batch autonomous production See `AbletonMCP_AI/docs/API_REFERENCE_PRO.md` for complete documentation. @@ -545,9 +533,8 @@ All sprints saved to `AbletonMCP_AI/docs/sprint_N_description.md` ## Current Sprint Assignment -**Sprint 8 (Active):** MIDI Instrument Loading + BPM Integration -**Owner:** Qwen + Kimi -**Goal:** MIDI tracks sound without manual intervention -**Deadline:** TBD (user decides priority) +**Sprint 9 (Active):** Score → Render Pipeline (Compose-then-Render) +**Goal:** 50+ songs generated and rendered autonomously via ai_loop.py +**Status:** ✅ Completed 2026-04-14 (Strict Session View Implementation) -**Next:** Sprint 9 (Max for Live or Arrangement Recording) +**Key Dev:** Refer to `docs/SYSTEM_SCORE_RENDER.md` for JSON schema and rendering logic.