chore: clean repo and ignore runtime artifacts

This commit is contained in:
renato97
2026-03-29 12:57:49 -03:00
parent b92887836f
commit 5b804dbc8c
77 changed files with 28 additions and 26706 deletions

29
.gitignore vendored
View File

@@ -95,4 +95,31 @@ nul
*.sample_embeddings.json
# AbletonMCP_AI generated audio
AppData/
AppData/
# Local backups and archives
AbletonMCP_AI_BAK_*/
_archive/
# Ableton bundled controller content kept only on disk
Axiom_25_Classic/
Axiom_49_61_Classic/
BCF2000/
BCR2000/
KONTROL49/
MPD32/
MPK25/
MPK49/
MPK61/
MPK88/
Push/
Push2/
Roland_A_PRO/
microKONTROL/
# AbletonMCP_AI runtime state
AbletonMCP_AI/diversity_memory.json
AbletonMCP_AI/MCP_Server/scan_log.txt
AbletonMCP_AI/MCP_Server/*.log
AbletonMCP_AI/MCP_Server/health_check_result.json
*.bak

View File

@@ -1,26 +0,0 @@
{
"used_families": {
"acoustic": 8
},
"used_paths": {
"C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\Dubdogz & Jude & Frank - ININNA TORA (Extended Version) [@danielcarmona_dj].mp3": 1,
"C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Clap 5.wav": 2,
"C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Open Hat 9.wav": 1,
"C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Open Hat 3.wav": 1,
"C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Kick Hit 03.wav": 1,
"C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Clap & Snare Hit 12.wav": 1,
"C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Hat Hit 07.wav": 2,
"C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Open Hat 5.wav": 1,
"C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Kick Hit 10.wav": 1,
"C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Clap & Snare Hit 14.wav": 1,
"C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\Kit_01_OHH_A#_125.wav": 3,
"C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Kick Hit 02.wav": 1,
"C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Clap 8.wav": 1,
"C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Closed Hat 6.wav": 1,
"C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Kick 4.wav": 1,
"C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Open Hat 10.wav": 1
},
"generation_count": 7,
"last_updated": "2026-03-29T01:28:32.412286",
"version": "1.0"
}

View File

@@ -1,46 +0,0 @@
__pycache__/
*.py[cod]
*.pyo
.pytest_cache/
.mypy_cache/
.ruff_cache/
.venv/
venv/
.idea/
.vscode/
*.log
*.tmp
*.bak
*.asd
*.als
*.wav
*.aif
*.aiff
*.flac
*.ogg
*.mp3
exports/
render/
renders/
stems/
temp/
tmp/
/automation/telegram.local.json
/automation/wsl_runtime/
/automation/wsl.local.env
/automation/wsl/.env
/automation/runs/
/automation/.task_queue.tmp*
# Temp and debug files
*_errors*.txt
*.patch
fix.py
update_opencode.py
grant_permissions.py
GLM_TASK_*_REPORT.md
glmwork.md
# Library paths (user-specific)
librerias/
sample/

View File

@@ -1,727 +0,0 @@
# AbletonMCP-AI Full Handoff
This file is the broadest handoff in the repo.
If another AI needs to retake the project with minimal context loss, this is the file to read first.
Project root:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI`
Current local branch:
- `main`
Last pushed commit at the moment this file was updated:
- `2a0d2f3dbf5f89b18690fee2a2659957f81b8191`
## Read Order
Read in this order:
1. `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\CLAUDE.md`
2. `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\README.md`
3. `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\AI_HANDOFF.md`
4. `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\ARCHITECTURE.md`
5. `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\MCP_TOOLS.md`
6. `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\REMOTE_PROTOCOL.md`
7. `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\PROJECT_CONTEXT.md`
8. `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\GPU_DIRECTML.md`
Useful secondary docs:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\ABLETUNES_TEMPLATE_NOTES.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\SAMPLE_SYSTEM_README.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\ROADMAP_MASTER_GLM.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\automation\README.md`
## What This Project Is
AbletonMCP-AI is a hybrid system to control Ableton Live 12 from MCP, generate long editable arrangements, analyze reference tracks, retrieve similar material from a local sample library, and build original projects that feel closer to a real producer workflow than to a loop toy.
The desired output is:
- always Arrangement View
- editable tracks and clips
- many specialized roles
- buses and returns
- original output, not stems from the reference track
## Core Design Rules That Must Not Be Lost
- Arrangement-first is mandatory.
- Reference audio is for analysis, not plagiarism.
- Do not use stems from the target song in the final output.
- The system must keep working even if the M4L path is incomplete.
- Stable fallback is better than a flashy broken feature.
- The local sample library is the primary sound source.
- Validation and diagnosis matter because the stack is large and brittle.
## Important Paths
Main runtime:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\__init__.py`
MCP server:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\server.py`
Music generator:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\song_generator.py`
Reference analysis:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\reference_listener.py`
Sample selection:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\sample_selector.py`
Audio resampling:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\audio_resampler.py`
Socket smoke test:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\socket_smoke_test.py`
Segment RAG builder:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\segment_rag_builder.py`
Local library:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\all_tracks`
Reference folder:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\sample`
Ableton log:
- `C:\Users\ren\AppData\Roaming\Ableton\Live 12.0.15\Preferences\Log.txt`
Recovery popup file:
- `C:\Users\ren\AppData\Roaming\Ableton\Live 12.0.15\Preferences\CrashRecoveryInfo.cfg`
User library:
- `C:\Users\ren\Documents\Ableton\User Library`
## External Assets And References Used During Development
Reference tracks that shaped the direction:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\sample\Eli Brown x GeezLy - Me Gusta.mp3`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\sample\Mr. Pauer, Goyo - Quimica (Video Oficial).mp3`
Producer template reference pack:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\Abletunes_Free_Templates_Pack`
The reference tracks were used for:
- BPM and key estimation
- section and energy direction
- sample retrieval guidance
- stylistic remake goals
They were not supposed to be used as final stems.
## The Story From The Beginning Until Now
### Phase 0: Initial Goal
The original goal was not only to make sounds in Ableton, but to give the system the ability to:
- receive a prompt or a reference MP3
- understand the style and structure
- choose similar sounds from the local library
- generate a long arrangement that feels professionally produced
- keep the result editable inside Ableton
Very early it became clear that a plain Session View loop machine was not enough.
### Phase 1: Make The Remote Script Actually Work
The first major work was stabilizing the remote layer between MCP and Ableton.
Main problems solved in that phase:
- command naming mismatches between MCP and the Remote Script
- parameter normalization like `track_index`, `clip_index`, `scene_index`
- note writing API mismatch in Live
- socket protocol mismatch
- stale or broken sample manager initialization
This phase made the project usable enough to:
- create tracks
- create clips
- write MIDI
- query session state
- build the first generated projects
### Phase 2: Arrangement-First Pivot
At that point the output still behaved too much like Session clips and loops. The user explicitly wanted to see everything in Arrangement View.
That created the second major architectural pivot:
1. generate blueprint in Session
2. commit Session to Arrangement in ordered scene playback
3. place audio fallback and overlays in Arrangement
This became the stable route.
Important lesson:
- precreating certain audio tracks before the Session to Arrangement commit produced silent or broken sets
- the stable route is still: Session blueprint first, Arrangement commit second, audio layers after that
### Phase 3: Richer Project Shape
Once Arrangement-first worked, the next problem was musical complexity. The output felt like repeated loops.
The generator was expanded with:
- more track roles
- section-aware pattern generation
- richer drums, bass, melodic and FX layers
- scene naming and locators
- guide tracks
- more realistic arrangement structures
Important roles that became standard:
- kick
- clap
- snare fill
- hat closed
- hat open
- top loop
- percussion
- ride
- tom fill
- sub bass
- bass
- drone
- chords
- pluck
- vocal chop
- stab
- pad
- arp
- lead
- counter
- crash
- reverse FX
- riser FX
- impact FX
- atmos
- plus many `AUDIO ...` tracks
### Phase 4: Local Library As Primary Source
The next major issue was weak selection quality. At one point the code was pointed to a small mirror path and not to the real large library.
The real usable library was:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\all_tracks`
That directory had hundreds of real audio assets, while the old fallback mirror only had a few dozen.
Fixing that changed the quality of:
- drum matching
- bass loop matching
- vocal loop matching
- FX and atmos selection
This was a major turning point because the generated output stopped sounding starved.
### Phase 5: Reference-Led Reconstruction
After the library path was fixed, the project started using references more seriously.
The target workflow became:
- analyze the reference
- infer BPM, key, energy, style and section behavior
- find similar sounds in the local library
- reconstruct a new original track
There was a temporary detour where stems of a reference song were materialized for analysis, but that was explicitly rejected because the desired product is not a stem-based copy workflow.
The rule became strict:
- no final stems from the reference
- only original reconstruction using local assets and generated MIDI
### Phase 6: Buses, Returns, Master, And Production Logic
The project then moved from "long loop" to "production-shaped session".
Major additions:
- real bus tracks
- return tracks
- track routing into buses
- role-based mixing
- return snapshots by section
- master chain blueprint
Typical bus layout now:
- drums
- bass
- music
- vocal
- FX
Typical return layout now:
- space
- echo
- heat
- glue
This phase made the result feel more like a produced project and less like independent loops.
### Phase 7: Template Analysis
The project analyzed professional Abletunes templates to absorb real producer patterns.
Main conclusions:
- professional templates are Arrangement-heavy
- there are many specialized layers, not just one drum loop and one bass loop
- sidechain triggers, transitions, buses, returns and printed audio are common
- arrangement blocks are often 16, 32 or 64 bars
- heavy automation is normal
This analysis informed later changes in:
- section shape
- layer count
- transition behavior
- bus logic
### Phase 8: Audio Fallback And Hybrid Output
The system learned to combine:
- MIDI and stock instruments
- local audio loops and hits
- special `AUDIO ...` overlays
This was critical because:
- sometimes stock-device generation gives editability and musical logic
- sometimes local audio assets give the genre-specific realism that MIDI alone cannot deliver
So the stack became hybrid by design, not by accident.
### Phase 9: Audio Resampling And Derived FX
Then came the derived transition layer.
The system added support for:
- reverse FX
- riser
- downlifter
- stutter
This area improved through:
- `audio_resampler.py`
- reference-driven placement
- later bugfixes for short clips and defensive rendering
One real bug that had to be solved:
- `AUDIO RESAMPLE STUTTER` failed until the resampler and short-clip FFT handling were hardened
That was fixed later and validated in real Live runs.
### Phase 10: GPU And Deeper Reference Listening
The user specifically asked to use GPU if possible.
The system moved toward:
- `DirectML`
- more expensive analysis of the reference
- segmented analysis at multiple window sizes
- heavier similarity scoring
This became one of the most important changes for retrieval quality.
The reference listener started doing:
- segmenting the reference into short windows
- scoring by role
- deeper reranking
- using local metadata and cache
This was the start of an audio-retrieval-style workflow rather than simple filename matching.
### Phase 11: Segment RAG Direction
To go further, the project started building a segment-level retrieval cache for the local library.
That work added:
- persistent per-segment cache files
- a segment builder CLI
- partial index expansion over the real library
This is not a text RAG in the usual sense. It is closer to:
- segmented audio retrieval
- coarse search plus rerank
- role-aware filtering
- diversity constraints
This remains one of the most promising long-term directions for better remake quality.
### Phase 12: GLM Workflow
At some point token efficiency became a concern, so GLM-5 was introduced as a worker model.
The workflow that proved useful was:
1. Codex writes a narrow `.md` task
2. GLM edits only 1 to 3 related files
3. Codex reviews the diff
4. Codex corrects technical mistakes and validates the runtime
What GLM was good at:
- narrow feature implementation
- heuristic expansion
- helper tools
- reports and manifests
What GLM was bad at:
- declaring things complete too early
- runtime-sensitive work without supervision
- architectural judgment
- avoiding diff inflation
Practical verdict:
- useful as a worker
- not reliable enough as the sole closer of large features
### Phase 13: Temporary WSL / n8n / Local Gitea Automation Detour
There was a detour into WSL orchestration, n8n, local Gitea and Telegram loops.
Scaffolding was generated for:
- WSL deployment
- Docker Compose
- n8n flows
- Telegram notifications
- GLM to Codex automation loops
That stack produced a lot of files under:
- `automation\`
- `automation\wsl\`
But the key lesson was:
- the deployment summary was overstated
- the generated stack was not a truly finished deployment
- the main product value still lives in the music system, not in orchestration
Because of that, the project intentionally pivoted back to the flow that worked:
- Codex writes focused `.md`
- GLM does bounded work
- Codex reviews and fixes
### Phase 14: Retrieval Hardening, Pattern Banks, Transition Materialization
After the orchestration detour, the roadmap was re-centered on the actual product.
Task batches improved:
- role-safe retrieval
- repetition penalties
- more section pattern banks
- transition event materialization
This reduced some of the "same loop again" feeling, but did not solve everything.
### Phase 15: Device Automation Snapshots
The latest work pushed into:
- track device parameter snapshots
- return device parameter snapshots
- master section snapshots
- automation summaries
GLM implemented part of that, but the raw result was not correct.
The real fixes applied afterward were:
- use the already existing `SECTION_DEVICE_AUTOMATION` instead of duplicate tables
- use the already existing `MASTER_DEVICE_AUTOMATION`
- flatten `device_parameters` into the format the server actually applies
- make the server accept both flat and nested payloads defensively
- add `mix_automation_summary`
- add `mix_automation_warnings`
This work lives mainly in:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\song_generator.py`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\server.py`
### Phase 16: Fix for "Only Piano" Issue (Audio Samples Not Loading)
The user reported: "lo que me generaste solo tiene algunos pianos, nada de sonidos de mi biblioteca!" - the generated tracks only had piano/MIDI sounds, no actual audio samples from the local library.
Root causes found and fixed:
1. **`ROLE_PATTERNS` used non-recursive globs** in `reference_listener.py` lines 1228-1254:
- Patterns like `'*Kick*.wav'` couldn't match files in subdirectories like `loops/kick/`
- Fixed by changing to recursive patterns: `'**/*Kick*.wav'`
2. **Method resolution bug** in `reference_listener.py`:
- `_get_role_section_features` and `_section_character_bonus` are methods in `SectionDetector` class
- Were being called as `self._method` from `ReferenceAudioListener` instead of `self._section_detector._method`
- Fixed at lines 3247 and 3270-3272
After fixes, verification showed:
- Reference audio plan builds correctly with 30+ layers
- `_materialize_reference_audio_layers()` creates actual audio tracks
- Real samples from local library are used (e.g., "mt kick hit 10.wav", "bbh - primer impacto - bass loop 03 g#m.wav")
- 34 audio tracks created (8 derived + 26 base), 0 errors
## What Currently Works
The system can currently:
- generate full projects in Arrangement View
- build MIDI plus stock-device layers
- build many `AUDIO ...` layers from the local library
- analyze a reference track
- build a retrieval-guided original result
- commit scenes to Arrangement
- create buses and returns
- route tracks into buses
- apply track, return and master snapshots
- diagnose the generated set
- validate the generated set
- use DirectML for deeper matching work
## What Is Stable
The most stable route today is:
1. analyze reference if one is available
2. build config in `song_generator.py`
3. materialize Session blueprint through the runtime
4. commit Session to Arrangement
5. place audio layers in Arrangement
6. validate and diagnose
Do not casually change this order.
## What Is Still Weak
The project still has real weaknesses:
- some generations still feel too loop-based
- retrieval can still pick poor family matches
- remake quality is not yet close enough to a convincing stylistic remake
- some runs still overuse familiar sound families
- loudness and gain staging can vary too much between runs
- derived resample layers are not always present in the final set
- the MCP wrapper for `generate_track` can time out even when Live keeps working
## Known Operational Problems
### 1. Recovery popup can block everything
If Live crashes or thinks recovery is needed, a modal popup can block the socket.
The file involved is:
- `C:\Users\ren\AppData\Roaming\Ableton\Live 12.0.15\Preferences\CrashRecoveryInfo.cfg`
Practical fix used during development:
- kill Ableton
- blank that file
- restart Ableton
### 2. MCP wrapper timeout
The MCP tool wrapper around `generate_track` often times out at about 120 seconds.
Important:
- timeout does not always mean generation failed
- often the set keeps building inside Live
Safer checks after a timeout:
- `get_session_info()`
- `get_tracks()`
- `validate_set()`
- `diagnose_generated_set()`
### 3. Runtime state can drift from stored manifest
At least once, `get_generation_manifest()` returned stale data from an older generation while the actual current set in Live was already different.
Trust runtime state first:
- session info
- track list
- diagnosis
- validation
Manifest is useful, but not always the freshest source.
### 4. GLM reports often exaggerate completion
Never trust a GLM report by itself.
Always compare:
- report
- actual diff
- runtime result
## Current Validation Habit
Minimum technical checks after code edits:
```powershell
python -m py_compile "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\audio_resampler.py"
python -m py_compile "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\reference_listener.py"
python -m py_compile "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\sample_selector.py"
python -m py_compile "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\server.py"
python -m py_compile "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\song_generator.py"
python -m py_compile "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\__init__.py"
```
Minimum Live checks:
- `get_session_info()`
- `get_tracks()`
- `validate_set(check_clips=True, check_gain=True, check_routing=True)`
- `diagnose_generated_set()`
Useful direct smoke test:
```powershell
cd "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server"
python socket_smoke_test.py
```
## Current Roadmap Priority
The active roadmap is:
1. better library retrieval
2. more real section variation
3. better transition and derived FX placement
4. better device automation
5. gain staging and loudness consistency
6. better remake ability
7. stronger QA and export
The roadmap source file is:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\ROADMAP_MASTER_GLM.md`
## Current GLM Workflow
Keep GLM on short, bounded tasks only.
Recent task files:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\GLM_NEXT_TASK_001_RETRIEVAL_ROLE_PENALTIES.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\GLM_NEXT_TASK_002_SECTION_PATTERN_BANK_EXPANSION.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\GLM_NEXT_TASK_003_TRANSITION_EVENT_MATERIALIZATION.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\GLM_NEXT_TASK_004_DEVICE_AUTOMATION_SNAPSHOTS.md`
Corresponding reports:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\GLM_TASK_001_REPORT.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\GLM_TASK_002_REPORT.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\GLM_TASK_003_REPORT.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\GLM_TASK_004_REPORT.md`
Recommended pattern:
1. Codex writes one narrow task md
2. GLM edits only 1 to 3 files
3. Codex reviews diff
4. Codex corrects technical and runtime mistakes
5. only then decide whether the task is really done
## Current Documentation Map
Core repo docs:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\README.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\CLAUDE.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\AI_HANDOFF.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\ARCHITECTURE.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\MCP_TOOLS.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\REMOTE_PROTOCOL.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\PROJECT_CONTEXT.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\GPU_DIRECTML.md`
Generator and retrieval docs:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\ABLETUNES_TEMPLATE_NOTES.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\SAMPLE_SYSTEM_README.md`
Roadmaps and backlog:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\roadmap.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\roadmap2.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\ROADMAP_MASTER_GLM.md`
Automation detour docs:
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\automation\README.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\automation\MASTER_AUTONOMOUS_ROADMAP.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\automation\wsl\README.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\automation\wsl\DEPLOYMENT_SUMMARY.md`
- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\WSL_STACK.md`
## If You Need To Resume Development Safely
Start here:
1. read this file
2. read the current roadmap
3. inspect local git status
4. compile changed Python files
5. restart Live if `__init__.py` changed
6. clear recovery popup if needed
7. validate with `get_session_info`, `get_tracks`, `validate_set`, `diagnose_generated_set`
8. only then touch generation logic
## Current Honest Product Status
This project is no longer a basic prototype.
It is already an advanced Ableton generation system that can:
- create long arrangements
- use references intelligently
- retrieve from a large local library
- mix MIDI and audio material
- build buses, returns and snapshots
- generate editable results in Arrangement View
But it is still not finished.
The main gap is no longer raw plumbing. The main gap is artistic fidelity:
- better retrieval
- better variation
- better remake quality
- more consistent mix quality
That is the real work that remains.

View File

@@ -1,39 +0,0 @@
# Abletunes Template Notes
Estos templates muestran patrones claros de produccion real que conviene copiar en el generador.
## Patrones fuertes
- Son `arrangement-first`, no `session-first`. En los cuatro sets los clips viven casi enteros en Arrangement y las scenes estan vacias o sin rol productivo.
- Todos usan locators para secciones (`Intro`, `Breakdown`, `Drop`, `Break`, `Outro`, `End`) y esas secciones casi siempre caen en bloques de `16`, `32`, `64`, `96` o `128` beats.
- Siempre hay jerarquia por grupos: drums/top drums, bass, instruments, vox, fx.
- Casi siempre existe un `SC Trigger` o pista equivalente dedicada al sidechain.
- Los drums no son una sola pista. Hay capas separadas para kick, clap, snare, hats, ride, perc, fills, crashes, risers y FX.
- Las partes armonicas tampoco son una sola pista. Aparecen capas distintas para bassline, reese/sub, chord, piano, string, pluck, lead y layers.
- Mezclan MIDI e audio de forma agresiva. Un productor no se queda solo con MIDI: imprime loops, resamples, freeze y audios procesados cuando hace falta.
- Hay bastante tratamiento por pista: `Eq8`, `Compressor2`, `Reverb`, `AutoFilter`, `PingPongDelay`, `GlueCompressor`, `MultibandDynamics`, `Limiter`, `Saturator`.
## Lo que mas importa para el MCP
- El generador no tiene que crear "un loop largo". Tiene que crear secciones con mutaciones claras entre una y otra.
- Cada seccion necesita variacion de densidad, no solo mute/unmute basico. Los templates meten fills, crashes, reverse FX, chants, top loops y capas extra solo en puntos de tension.
- El arreglo profesional usa mas pistas especializadas de las que hoy genera el MCP. La separacion por rol es parte del sonido.
- Hay que imprimir mas audio original derivado del propio proyecto: resamples, reverses, freezes y FX hechos a partir de material propio.
- Los returns son pocos pero concretos. No hace falta llenar de sends; hace falta `reverb`, `delay` y buses de grupo bien usados.
## Señales concretas vistas en el pack
- `Abletunes - Dope As F_ck`: `128 BPM`, 6 grupos, 2 returns, `Sylenth1` dominante, mucha automatizacion (`8121` eventos).
- `Abletunes - Freedom`: `126 BPM`, mezcla house mas simple, bateria muy separada, menos automatizacion, mucho `OriginalSimpler` + `Serum`.
- `Abletunes - Hideout`: set largo y cargado, `Massive` + `Sylenth1`, una bateria enorme y mucha automatizacion (`6470` eventos).
- `Abletunes - Nobody's Watching`: enfoque mas stock, usa `Operator`, `Simpler`, bastante audio vocal y FX impresos.
## Reglas que deberiamos incorporar
- Generar por defecto en Arrangement, con locators reales y secciones de 16/32 bars.
- Añadir `SC Trigger`, grupos y returns fijos desde el blueprint.
- Separar drums en mas roles: kick, clap main, clap layer, snare fill, hats, ride, perc main, perc FX, crash, reverse, riser.
- Separar armonia y hooks: sub, bassline, chord stab, piano/keys, string/pad, pluck, lead, accent synth.
- Crear eventos de transicion por seccion: uplifter, downlifter, reverse crash, vocal chop, tom fill.
- Imprimir audio derivado del material generado cuando una capa necesite mas impacto o textura.
- Meter automatizacion por seccion en filtros, sends, volumen de grupos y FX de transicion.

View File

@@ -1,203 +0,0 @@
# Sistema de Gestión de Samples - AbletonMCP-AI
Sistema completo de indexación, clasificación y selección inteligente de samples musicales.
## Componentes
### 1. `audio_analyzer.py` - Análisis de Audio
Detecta automáticamente características de archivos de audio:
- **BPM**: Detección de tempo mediante análisis de onset
- **Key**: Detección de tonalidad mediante cromagrama
- **Tipo**: Clasificación en kick, snare, bass, synth, etc.
- **Características espectrales**: Centroide, rolloff, RMS
**Uso básico:**
```python
from audio_analyzer import analyze_sample
result = analyze_sample("path/to/sample.wav")
print(f"BPM: {result['bpm']}, Key: {result['key']}")
print(f"Tipo: {result['sample_type']}")
```
**Backends:**
- `librosa`: Análisis completo (requiere instalación)
- `basic`: Análisis por nombre de archivo (sin dependencias)
### 2. `sample_manager.py` - Gestión de Librería
Gestor completo de la librería de samples:
- Indexación recursiva de directorios
- Clasificación automática por categorías
- Metadatos extensibles (tags, rating, géneros)
- Búsqueda avanzada con múltiples filtros
- Persistencia en JSON
**Categorías principales:**
- `drums`: kick, snare, clap, hat, perc, shaker, tom, cymbal
- `bass`: sub, bassline, acid
- `synths`: lead, pad, pluck, chord, fx
- `vocals`: vocal, speech, chant
- `loops`: drum_loop, bass_loop, synth_loop, full_loop
- `one_shots`: hit, noise
**Uso básico:**
```python
from sample_manager import SampleManager
# Inicializar
manager = SampleManager(r"C:\Users\ren\embeddings\all_tracks")
# Escanear
stats = manager.scan_directory(analyze_audio=True)
# Buscar
kicks = manager.search(sample_type="kick", key="Am", bpm=128)
house_samples = manager.search(genres=["house"], limit=10)
# Obtener pack completo
pack = manager.get_pack_for_genre("techno", key="F#m", bpm=130)
```
### 3. `sample_selector.py` - Selección Inteligente
Selección contextual basada en género, key y BPM:
- Perfiles de género predefinidos
- Matching armónico entre samples
- Generación de kits de batería coherentes
- Mapeo MIDI automático
**Géneros soportados:**
- Techno (industrial, minimal, acid)
- House (deep, classic, progressive)
- Tech-House
- Trance (progressive, psy)
- Drum & Bass (liquid, neuro)
- Ambient
**Uso básico:**
```python
from sample_selector import SampleSelector
selector = SampleSelector()
# Seleccionar para un género
group = selector.select_for_genre("techno", key="F#m", bpm=130)
# Acceder a elementos
group.drums.kick # Sample de kick
group.bass # Lista de bass samples
group.synths # Lista de synths
# Mapeo MIDI
mapping = selector.get_midi_mapping_for_kit(group.drums)
# Cambio de key armónico
new_key = selector.suggest_key_change("Am", "fifth_up") # Em
```
## Integración con MCP Server
El servidor MCP expone las siguientes herramientas:
### Gestión de Librería
- `scan_sample_library` - Escanear directorio de samples
- `get_sample_library_stats` - Estadísticas de la librería
### Búsqueda y Selección
- `advanced_search_samples` - Búsqueda con filtros múltiples
- `select_samples_for_genre` - Selección automática por género
- `get_drum_kit_mapping` - Kit de batería con mapeo MIDI
- `get_sample_pack_for_project` - Pack completo para proyecto
### Análisis y Compatibilidad
- `analyze_audio_file` - Analizar archivo de audio
- `find_compatible_samples` - Encontrar samples compatibles
- `suggest_key_change` - Sugerir cambios de tonalidad
## Estructura de Datos
### Sample
```python
@dataclass
class Sample:
id: str # ID único
name: str # Nombre del archivo
path: str # Ruta completa
category: str # Categoría principal
subcategory: str # Subcategoría
sample_type: str # Tipo específico
key: Optional[str] # Tonalidad (Am, F#m, C)
bpm: Optional[float] # BPM
duration: float # Duración en segundos
genres: List[str] # Géneros asociados
tags: List[str] # Tags
rating: int # Rating 0-5
```
### DrumKit
```python
@dataclass
class DrumKit:
name: str
kick: Optional[Sample]
snare: Optional[Sample]
clap: Optional[Sample]
hat_closed: Optional[Sample]
hat_open: Optional[Sample]
perc1: Optional[Sample]
perc2: Optional[Sample]
```
## Mapeo MIDI
Notas estándar para drums:
- `36` (C1): Kick
- `38` (D1): Snare
- `39` (D#1): Clap
- `42` (F#1): Closed Hat
- `46` (A#1): Open Hat
- `41` (F1): Tom Low
- `49` (C#2): Crash
## Ejemplos de Uso
### Crear un track completo
```python
# Seleccionar samples para techno
selector = get_selector()
group = selector.select_for_genre("techno", key="F#m", bpm=130)
# Usar con Ableton
ableton = get_ableton_connection()
# Crear tracks y cargar samples
for i, sample in enumerate([group.drums.kick, group.drums.snare]):
if sample:
print(f"Cargar {sample.name} en track {i}")
```
### Buscar samples compatibles
```python
# Encontrar samples que combinen con un kick
kick = manager.get_by_path("path/to/kick.wav")
compatible = selector.find_compatible_samples(kick, max_results=5)
for sample, score in compatible:
print(f"{sample.name}: {score:.1%} compatible")
```
## Archivos Generados
- `.sample_cache/sample_library.json` - Índice de la librería
- `.sample_cache/library_stats.json` - Estadísticas
## Dependencias Opcionales
Para análisis de audio completo:
```bash
pip install librosa soundfile numpy
```
Sin estas dependencias, el sistema funciona en modo "basic" usando metadatos de los nombres de archivo.

View File

@@ -1,26 +0,0 @@
"""
MCP Server para AbletonMCP-AI
Servidor FastMCP que conecta Claude con Ableton Live 12
"""
from .server import mcp, main
from .song_generator import SongGenerator
from .sample_index import SampleIndex
# Nuevo sistema de samples
try:
SAMPLE_SYSTEM_AVAILABLE = True
except ImportError:
SAMPLE_SYSTEM_AVAILABLE = False
__all__ = [
'mcp', 'main',
'SongGenerator', 'SampleIndex',
]
if SAMPLE_SYSTEM_AVAILABLE:
__all__.extend([
'SampleManager', 'Sample', 'get_manager',
'SampleSelector', 'get_selector', 'DrumKit', 'InstrumentGroup',
'AudioAnalyzer', 'analyze_sample', 'SampleType',
])

View File

@@ -1,318 +0,0 @@
import json
import socket
from datetime import datetime
import os
LOG_FILE = r"C:\Users\ren\Documents\Ableton\Logs\agent11_review_harmony.txt"
CHORD_TONES = {
"Am": [57, 60, 64],
"F": [53, 57, 60],
"C": [48, 52, 55],
"G": [43, 47, 50]
}
CHORD_NAMES = {
"Am": ["A", "C", "E"],
"F": ["F", "A", "C"],
"C": ["C", "E", "G"],
"G": ["G", "B", "D"]
}
AM_SCALE = [57, 59, 60, 62, 64, 65, 67]
PROGRESSION_ORDER = ["Am", "F", "C", "G"]
CHORD_DURATION = 8.0
def pitch_to_name(pitch):
names = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
return names[pitch % 12]
def get_chord_at_time(start_time):
chord_index = int(start_time // CHORD_DURATION) % 4
return PROGRESSION_ORDER[chord_index]
def normalize_to_octave(pitch, target_octave=3):
return (pitch % 12) + (target_octave * 12)
class AbletonSocketClient:
def __init__(self, host="127.0.0.1", port=9877, timeout=15.0):
self.host = host
self.port = port
self.timeout = timeout
def send(self, command_type, params=None):
payload = json.dumps({
"type": command_type,
"params": params or {},
}).encode("utf-8") + b"\n"
with socket.create_connection((self.host, self.port), timeout=self.timeout) as sock:
sock.sendall(payload)
reader = sock.makefile("r", encoding="utf-8")
try:
line = reader.readline()
finally:
reader.close()
try:
sock.shutdown(socket.SHUT_RDWR)
except OSError:
pass
if not line:
raise RuntimeError(f"No response for command: {command_type}")
return json.loads(line)
def log_message(msg):
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
log_line = f"[{timestamp}] {msg}\n"
print(log_line.strip())
with open(LOG_FILE, "a", encoding="utf-8") as f:
f.write(log_line)
def analyze_track_harmony(client, track_index, track_name, scene_index=0):
issues = []
notes_in_key = 0
notes_out_of_key = 0
chord_matches = 0
chord_mismatches = 0
try:
response = client.send("get_notes", {
"track_index": track_index,
"scene_index": scene_index
})
if response.get("status") != "success":
return {"error": response.get("message", "Unknown error")}
notes = response.get("result", {}).get("notes", [])
if not notes:
return {"warning": "No notes found in clip"}
for note in notes:
pitch = note.get("pitch", 60)
start = note.get("start", 0)
duration = note.get("duration", 1)
pitch_class = pitch % 12
current_chord = get_chord_at_time(start)
in_am_scale = any((pitch % 12) == (p % 12) for p in AM_SCALE)
if in_am_scale:
notes_in_key += 1
else:
notes_out_of_key += 1
issues.append({
"type": "out_of_key",
"pitch": pitch,
"pitch_name": pitch_to_name(pitch),
"start": start,
"expected": "Am scale (A, B, C, D, E, F, G)"
})
chord_tones_normalized = [t % 12 for t in CHORD_TONES[current_chord]]
if pitch_class in chord_tones_normalized:
chord_matches += 1
else:
chord_mismatches += 1
chord_tone_names = CHORD_NAMES[current_chord]
issues.append({
"type": "chord_tone_mismatch",
"pitch": pitch,
"pitch_name": pitch_to_name(pitch),
"start": start,
"chord": current_chord,
"expected_chord_tones": chord_tone_names
})
return {
"total_notes": len(notes),
"notes_in_key": notes_in_key,
"notes_out_of_key": notes_out_of_key,
"chord_matches": chord_matches,
"chord_mismatches": chord_mismatches,
"issues": issues
}
except Exception as e:
return {"error": str(e)}
def analyze_bass_notes(client, track_index, scene_index=0):
issues = []
correct_roots = 0
incorrect_roots = 0
try:
response = client.send("get_notes", {
"track_index": track_index,
"scene_index": scene_index
})
if response.get("status") != "success":
return {"error": response.get("message", "Unknown error")}
notes = response.get("result", {}).get("notes", [])
if not notes:
return {"warning": "No bass notes found"}
ROOT_NOTES = {
"Am": 57,
"F": 53,
"C": 48,
"G": 43
}
for note in notes:
pitch = note.get("pitch", 60)
start = note.get("start", 0)
current_chord = get_chord_at_time(start)
expected_root = ROOT_NOTES[current_chord]
expected_root_class = expected_root % 12
pitch_class = pitch % 12
if pitch_class == expected_root_class:
correct_roots += 1
else:
incorrect_roots += 1
if start % 4.0 < 0.5:
issues.append({
"type": "wrong_bass_root",
"pitch": pitch,
"pitch_name": pitch_to_name(pitch),
"start": start,
"chord": current_chord,
"expected_root": pitch_to_name(expected_root)
})
return {
"total_notes": len(notes),
"correct_roots": correct_roots,
"incorrect_roots": incorrect_roots,
"issues": issues
}
except Exception as e:
return {"error": str(e)}
def main():
os.makedirs(os.path.dirname(LOG_FILE), exist_ok=True)
log_message("=" * 60)
log_message("AGENT 11 - HARMONIC COHERENCE REVIEW")
log_message("=" * 60)
log_message(f"Target progression: Am - F - C - G (8 beats each)")
log_message(f"Am scale: A, B, C, D, E, F, G")
log_message("")
client = AbletonSocketClient()
session = client.send("get_session_info")
if session.get("status") != "success":
log_message("ERROR: Cannot connect to Ableton session")
return
log_message(f"Session: {session.get('result', {}).get('num_tracks', 0)} tracks, "
f"tempo: {session.get('result', {}).get('tempo', 120)} BPM")
tracks_response = client.send("get_tracks")
if tracks_response.get("status") != "success":
log_message("ERROR: Cannot get tracks")
return
tracks = tracks_response.get("result", [])
midi_tracks = [
(i, t.get("name", "Unknown"), t.get("session_clip_count", 0))
for i, t in enumerate(tracks)
if t.get("has_midi_input") and t.get("session_clip_count", 0) > 0
]
log_message(f"Found {len(midi_tracks)} MIDI tracks with clips")
log_message("")
total_issues = 0
critical_issues = 0
for track_index, track_name, clip_count in midi_tracks:
log_message(f"\n--- TRACK {track_index}: {track_name} ---")
if "BASS" in track_name.upper():
log_message("Analyzing as BASS track (checking root notes)")
result = analyze_bass_notes(client, track_index)
else:
log_message("Analyzing harmonic content")
result = analyze_track_harmony(client, track_index, track_name)
if "error" in result:
log_message(f" ERROR: {result['error']}")
continue
if "warning" in result:
log_message(f" WARNING: {result['warning']}")
continue
if "total_notes" in result:
log_message(f" Total notes: {result['total_notes']}")
if "notes_in_key" in result:
log_message(f" Notes in Am scale: {result['notes_in_key']}/{result['total_notes']}")
if result["notes_out_of_key"] > 0:
log_message(f" OUT OF KEY: {result['notes_out_of_key']} notes")
total_issues += result["notes_out_of_key"]
if "chord_matches" in result:
log_message(f" Chord tone matches: {result['chord_matches']}/{result['total_notes']}")
if result["chord_mismatches"] > 0:
log_message(f" CHORD MISMATCHES: {result['chord_mismatches']} notes")
if "correct_roots" in result:
log_message(f" Correct bass roots: {result['correct_roots']}/{result['total_notes']}")
if result["incorrect_roots"] > 0:
log_message(f" WRONG BASS ROOTS: {result['incorrect_roots']} notes")
total_issues += result["incorrect_roots"]
critical_issues += result["incorrect_roots"]
if result.get("issues"):
for issue in result["issues"][:5]:
if issue["type"] == "out_of_key":
log_message(f" [ISSUE] Note {issue['pitch_name']}{issue['pitch']} at beat {issue['start']:.1f} "
f"not in Am scale")
elif issue["type"] == "chord_tone_mismatch":
log_message(f" [ISSUE] Note {issue['pitch_name']}{issue['pitch']} at beat {issue['start']:.1f} "
f"not in chord {issue['chord']} (expected: {issue['expected_chord_tones']})")
elif issue["type"] == "wrong_bass_root":
log_message(f" [CRITICAL] Bass note {issue['pitch_name']}{issue['pitch']} at beat {issue['start']:.1f} "
f"should be {issue['expected_root']} for chord {issue['chord']}")
log_message("\n" + "=" * 60)
log_message("HARMONIC COHERENCE SUMMARY")
log_message("=" * 60)
if critical_issues > 0:
log_message(f"STATUS: CRITICAL ISSUES FOUND")
log_message(f" - {critical_issues} critical bass root mismatches")
log_message(f" - {total_issues} total harmonic issues")
log_message("")
log_message("RECOMMENDATION: Review bass notes and chord tones")
elif total_issues > 0:
log_message(f"STATUS: MINOR ISSUES FOUND")
log_message(f" - {total_issues} notes out of Am scale")
log_message("")
log_message("RECOMMENDATION: May be intentional chromatic passing tones")
else:
log_message(f"STATUS: HARMONICALLY COHERENT")
log_message(f" - All notes in Am scale")
log_message(f" - Bass follows root progression A-F-C-G")
log_message(f" - Chord tones align with progression")
log_message("")
log_message("Agent 11 review complete.")
if __name__ == "__main__":
main()

View File

@@ -1,192 +0,0 @@
"""
Agent 17 - Sample Loading Reviewer
Verifies audio tracks have samples loaded and loads samples if needed.
"""
import socket
import json
import os
import glob
from datetime import datetime
LOG_FILE = r"C:\Users\ren\Documents\Ableton\Logs\agent17_review_samples.txt"
SAMPLE_LIBRARY = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\all_tracks"
ORGANIZED_LIBRARY = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples"
HOST = "127.0.0.1"
PORT = 9877
def log(message):
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
log_line = f"[{timestamp}] {message}"
print(log_line)
os.makedirs(os.path.dirname(LOG_FILE), exist_ok=True)
with open(LOG_FILE, "a", encoding="utf-8") as f:
f.write(log_line + "\n")
def send_command(command_type, params=None):
if params is None:
params = {}
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(30)
try:
sock.connect((HOST, PORT))
request = {"type": command_type, "params": params}
sock.sendall((json.dumps(request) + "\n").encode("utf-8"))
response = b""
while True:
chunk = sock.recv(4096)
if not chunk:
break
response += chunk
if b"\n" in response:
break
return json.loads(response.decode("utf-8").strip())
finally:
sock.close()
def find_samples(query, sample_type=None):
samples = []
search_paths = [ORGANIZED_LIBRARY, SAMPLE_LIBRARY]
for search_path in search_paths:
if not os.path.exists(search_path):
continue
pattern = f"**/*{query}*.wav"
for filepath in glob.glob(os.path.join(search_path, pattern), recursive=True):
if sample_type:
type_dir = os.path.join(search_path, sample_type)
if type_dir.lower() in filepath.lower():
samples.append(filepath)
else:
samples.append(filepath)
return samples[:15]
def load_samples_to_track(track_index, track_name, sample_type, positions):
samples = find_samples(sample_type)
if not samples:
log(f" No samples found for type: {sample_type}")
return 0
clips_loaded = 0
for i, sample_path in enumerate(samples):
if clips_loaded >= 10:
break
position = positions[i] if i < len(positions) else positions[-1] + (i - len(positions) + 1) * 4
try:
result = send_command("create_arrangement_audio_pattern", {
"track_index": track_index,
"file_path": sample_path,
"positions": [position],
"name": f"{track_name} Clip {i+1}"
})
if result.get("status") == "success":
clips_loaded += 1
log(f" Loaded: {os.path.basename(sample_path)} at position {position}")
else:
log(f" Failed: {result.get('message', 'Unknown error')}")
except Exception as e:
log(f" Error loading sample: {e}")
return clips_loaded
def main():
log("=" * 60)
log("Agent 17 - Sample Loading Reviewer Started")
log("=" * 60)
log("\n[1] Connecting to Ableton socket...")
try:
session = send_command("get_session_info", {})
if session.get("status") != "success":
log(f"ERROR: Failed to get session info: {session}")
return
log(f"Connected. Tempo: {session.get('result', {}).get('tempo', 'unknown')} BPM")
except Exception as e:
log(f"ERROR: Cannot connect to Ableton: {e}")
return
log("\n[2] Getting track list...")
try:
tracks_response = send_command("get_tracks", {})
if tracks_response.get("status") != "success":
log(f"ERROR: Failed to get tracks: {tracks_response}")
return
tracks = tracks_response.get("result", [])
log(f"Found {len(tracks)} tracks")
except Exception as e:
log(f"ERROR: Cannot get tracks: {e}")
return
log("\n[3] Analyzing audio tracks...")
audio_tracks_needing_samples = []
for track in tracks:
track_name = track.get("name", "")
track_index = track.get("index", -1)
has_audio = track.get("has_audio_input", False) and track.get("has_audio_output", False)
has_midi = track.get("has_midi_input", False)
arr_clips = track.get("arrangement_clip_count", 0)
if has_audio and not has_midi:
if arr_clips < 10:
audio_tracks_needing_samples.append({
"index": track_index,
"name": track_name,
"clips": arr_clips
})
log(f" Track {track_index}: '{track_name}' - {arr_clips} clips (NEEDS SAMPLES)")
else:
log(f" Track {track_index}: '{track_name}' - {arr_clips} clips (OK)")
if not audio_tracks_needing_samples:
log("\n[4] All audio tracks have sufficient samples!")
return
log(f"\n[4] {len(audio_tracks_needing_samples)} tracks need samples. Loading...")
track_type_map = {
"KICK": "kick",
"SNARE": "snare",
"HATS": "hat",
"HAT": "hat",
"BASS": "bass",
"LEAD": "synth",
"PAD": "atmos",
"ARP": "synth",
"PERC": "percussion",
"VOCAL": "vocal",
"RISER": "riser",
"CRASH": "crash",
"DOWNLIFTER": "fx",
"AUDIO": "synth"
}
positions = [0, 8, 16, 24, 32, 40, 48, 56, 64, 72]
for track_info in audio_tracks_needing_samples:
track_index = track_info["index"]
track_name = track_info["name"]
sample_type = "synth"
for key, stype in track_type_map.items():
if key in track_name.upper():
sample_type = stype
break
log(f"\n Loading {sample_type} samples into track {track_index} ('{track_name}')...")
clips_loaded = load_samples_to_track(track_index, track_name, sample_type, positions)
track_info["loaded"] = clips_loaded
log("\n" + "=" * 60)
log("SUMMARY")
log("=" * 60)
for track_info in audio_tracks_needing_samples:
log(f" Track {track_info['index']} ('{track_info['name']}'): {track_info.get('clips', 0)} -> +{track_info.get('loaded', 0)} clips loaded")
log("\nAgent 17 completed.")
if __name__ == "__main__":
main()

View File

@@ -1,104 +0,0 @@
#!/usr/bin/env python3
"""
Agent 7 - VOCAL/CHOIR SPECIALIST
Loads vocal samples at specific arrangement positions
"""
import socket
import json
import sys
HOST = "127.0.0.1"
PORT = 9877
VOCAL_MAIN_TRACK = 12
VOCAL_TEXTURE_TRACK = 13
VOCAL_MAIN_SAMPLES = [
r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\loops\vocal\BBH- Primer Impacto - Vocal Quema D#m 126 Bpm.wav",
r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\oneshots\vocal\BBH - Primer Impacto - Vocal Importante 1.wav",
r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\oneshots\vocal\BBH - Primer Impacto - Vocal Importante 2.wav",
r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\oneshots\vocal\BBH - Primer Impacto - Vocal Importante 3.wav",
]
VOCAL_TEXTURE_SAMPLES = [
r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\loops\vocal\Vox_03_Am_125.wav",
r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\loops\vocal\Vox_05_Cm_125.wav",
r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\loops\vocal\Vox_08_Cm_125.wav",
r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\loops\vocal\Vox_10_Bm_125.wav",
]
VOCAL_MAIN_POSITIONS = [16.0, 48.0, 80.0, 112.0]
VOCAL_TEXTURE_POSITIONS = [0.0, 32.0, 64.0, 96.0]
LOG_FILE = r"C:\Users\ren\Documents\Ableton\Logs\agent7_vocals.txt"
def send_command(command_type: str, params: dict = None, timeout: float = 45.0) -> dict:
payload = json.dumps({
"type": command_type,
"params": params or {},
}).encode("utf-8") + b"\n"
with socket.create_connection((HOST, PORT), timeout=timeout) as sock:
sock.sendall(payload)
reader = sock.makefile("r", encoding="utf-8")
line = reader.readline()
if not line:
raise RuntimeError(f"No response for command: {command_type}")
return json.loads(line)
def log(msg: str):
with open(LOG_FILE, "a", encoding="utf-8") as f:
f.write(msg + "\n")
print(msg)
def main():
log("=" * 60)
log("AGENT 7 - VOCAL/CHOIR SPECIALIST")
log("=" * 60)
# Step 1: Set input routing to "No Input" for both tracks
log("\n[STEP 1] Setting input routing to 'No Input'...")
for track_idx, track_name in [(VOCAL_MAIN_TRACK, "VOCAL MAIN"), (VOCAL_TEXTURE_TRACK, "VOCAL TEXTURE")]:
try:
result = send_command("set_track_input_routing", {"index": track_idx, "routing_name": "No Input"})
log(f" Track {track_idx} ({track_name}): {result}")
except Exception as e:
log(f" ERROR Track {track_idx}: {e}")
# Step 2: Load VOCAL MAIN samples at key moments
log("\n[STEP 2] Loading VOCAL MAIN samples at key moments...")
for i, (sample_path, position) in enumerate(zip(VOCAL_MAIN_SAMPLES, VOCAL_MAIN_POSITIONS)):
try:
result = send_command("create_arrangement_audio_pattern", {
"track_index": VOCAL_MAIN_TRACK,
"file_path": sample_path,
"positions": [position],
"name": f"Vocal Main {i+1}"
})
log(f" Position {position}: {sample_path.split(chr(92))[-1]} -> {result.get('status', 'unknown')}")
except Exception as e:
log(f" ERROR at position {position}: {e}")
# Step 3: Load VOCAL TEXTURE samples at atmospheric positions
log("\n[STEP 3] Loading VOCAL TEXTURE samples at atmospheric positions...")
for i, (sample_path, position) in enumerate(zip(VOCAL_TEXTURE_SAMPLES, VOCAL_TEXTURE_POSITIONS)):
try:
result = send_command("create_arrangement_audio_pattern", {
"track_index": VOCAL_TEXTURE_TRACK,
"file_path": sample_path,
"positions": [position],
"name": f"Vocal Texture {i+1}"
})
log(f" Position {position}: {sample_path.split(chr(92))[-1]} -> {result.get('status', 'unknown')}")
except Exception as e:
log(f" ERROR at position {position}: {e}")
log("\n" + "=" * 60)
log("AGENT 7 COMPLETE - Vocal layers loaded")
log("=" * 60)
if __name__ == "__main__":
main()

View File

@@ -1,102 +0,0 @@
import json
import socket
from datetime import datetime
LOG_FILE = r"C:\Users\ren\Documents\Ableton\Logs\agent8_fx.txt"
def log(msg):
timestamp = datetime.now().isoformat()
entry = f"[{timestamp}] {msg}"
print(entry)
with open(LOG_FILE, "a", encoding="utf-8") as f:
f.write(entry + "\n")
class AbletonSocketClient:
def __init__(self, host="127.0.0.1", port=9877, timeout=30.0):
self.host = host
self.port = port
self.timeout = timeout
def send(self, command_type, params=None):
payload = json.dumps({
"type": command_type,
"params": params or {},
}).encode("utf-8") + b"\n"
with socket.create_connection((self.host, self.port), timeout=self.timeout) as sock:
sock.sendall(payload)
reader = sock.makefile("r", encoding="utf-8")
try:
line = reader.readline()
finally:
reader.close()
try:
sock.shutdown(socket.SHUT_RDWR)
except OSError:
pass
if not line:
raise RuntimeError(f"No response for command: {command_type}")
return json.loads(line)
def main():
log("=" * 60)
log("AGENT 8 - FX TRANSITION SPECIALIST")
log("=" * 60)
client = AbletonSocketClient()
RISER_TRACK = 16
DOWNLIFTER_TRACK = 17
CRASH_TRACK = 18
RISER_PATH = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\loops\fx\BBH - Primer Impacto -Risers 2.wav"
DOWNLIFTER_PATH = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\loops\fx\EFX_01_Em_125.wav"
CRASH_PATH = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\loops\fx\BBH - Primer Impacto - Crash 1.wav"
RISER_POSITIONS = [14, 46, 78, 110, 142, 174]
DOWNLIFTER_POSITIONS = [16, 48, 80, 112, 144, 176]
CRASH_POSITIONS = [0, 32, 64, 96, 128, 160, 192]
log(f"Track indices: RISER={RISER_TRACK}, DOWNLIFTER={DOWNLIFTER_TRACK}, CRASH={CRASH_TRACK}")
log(f"Riser positions: {RISER_POSITIONS}")
log(f"Downlifter positions: {DOWNLIFTER_POSITIONS}")
log(f"Crash positions: {CRASH_POSITIONS}")
log("")
log("Step 1: Placing RISER samples...")
result = client.send("create_arrangement_audio_pattern", {
"track_index": RISER_TRACK,
"file_path": RISER_PATH,
"positions": RISER_POSITIONS,
"name": "RISER FX"
})
log(f"RISER result: {json.dumps(result, indent=2)}")
log("")
log("Step 2: Placing DOWNLIFTER samples (using EFX fallback)...")
result = client.send("create_arrangement_audio_pattern", {
"track_index": DOWNLIFTER_TRACK,
"file_path": DOWNLIFTER_PATH,
"positions": DOWNLIFTER_POSITIONS,
"name": "DOWNLIFTER FX"
})
log(f"DOWNLIFTER result: {json.dumps(result, indent=2)}")
log("")
log("Step 3: Placing CRASH samples...")
result = client.send("create_arrangement_audio_pattern", {
"track_index": CRASH_TRACK,
"file_path": CRASH_PATH,
"positions": CRASH_POSITIONS,
"name": "CRASH FX"
})
log(f"CRASH result: {json.dumps(result, indent=2)}")
log("")
log("=" * 60)
log("AGENT 8 COMPLETE")
log("=" * 60)
if __name__ == "__main__":
main()

View File

@@ -1,184 +0,0 @@
"""
Agent 9 - PERCUSSION SPECIALIST
Loads percussion samples into AUDIO PERC MAIN and AUDIO PERC FX tracks.
"""
import json
import socket
import os
from datetime import datetime
from typing import Any, Dict, List
LOG_FILE = r"C:\Users\ren\Documents\Ableton\Logs\agent9_perc.txt"
HOST = "127.0.0.1"
PORT = 9877
TIMEOUT = 30.0
PERC_MAIN_TRACK_INDEX = 14
PERC_FX_TRACK_INDEX = 15
PERC_MAIN_POSITIONS = [0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176]
PERC_FX_POSITIONS = [4, 12, 20, 28, 36, 44, 52, 60]
SAMPLE_BASE = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples"
PERC_LOOP_SAMPLES = [
os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_01_Fm_125.wav"),
os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_02_Any_125.wav"),
os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_03_A#_125.wav"),
os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_04_Any_125.wav"),
os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_05_Any_125.wav"),
os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_06_Dm_125.wav"),
os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_07_Cm_125.wav"),
os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_08_Fm_125.wav"),
os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_09_Bm_125.wav"),
os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_10_Dm_125.wav"),
os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_11_Am_125.wav"),
os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_12_Bm_125.wav"),
]
PERC_FX_SAMPLES = [
os.path.join(SAMPLE_BASE, "oneshots", "perc", "BBH - Primer Impacto - Shaker 2.wav"),
os.path.join(SAMPLE_BASE, "oneshots", "perc", "BBH - Primer Impacto - Shaker 3.wav"),
os.path.join(SAMPLE_BASE, "oneshots", "perc", "BBH - Primer Impacto - Bongos y Congas 1.wav"),
os.path.join(SAMPLE_BASE, "oneshots", "perc", "BBH - Primer Impacto - Bongos y Congas 2.wav"),
os.path.join(SAMPLE_BASE, "oneshots", "perc", "BBH - Primer Impacto - Bongos y Congas 3.wav"),
os.path.join(SAMPLE_BASE, "oneshots", "perc", "BBH - Primer Impacto - Bongos y Congas 4.wav"),
os.path.join(SAMPLE_BASE, "oneshots", "perc", "BBH - Primer Impacto - Shaker 6.wav"),
os.path.join(SAMPLE_BASE, "oneshots", "perc", "BBH - Primer Impacto - Shaker 8.wav"),
]
def log(msg: str):
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
line = f"[{timestamp}] {msg}"
print(line)
try:
os.makedirs(os.path.dirname(LOG_FILE), exist_ok=True)
with open(LOG_FILE, "a", encoding="utf-8") as f:
f.write(line + "\n")
except Exception as e:
print(f"Log write error: {e}")
class AbletonSocketClient:
def __init__(self, host: str = HOST, port: int = PORT, timeout: float = TIMEOUT):
self.host = host
self.port = port
self.timeout = timeout
def send(self, command_type: str, params: Dict[str, Any] = None) -> Dict[str, Any]:
payload = json.dumps({
"type": command_type,
"params": params or {},
}).encode("utf-8") + b"\n"
with socket.create_connection((self.host, self.port), timeout=self.timeout) as sock:
sock.sendall(payload)
reader = sock.makefile("r", encoding="utf-8")
try:
line = reader.readline()
finally:
reader.close()
try:
sock.shutdown(socket.SHUT_RDWR)
except OSError:
pass
if not line:
raise RuntimeError(f"No response for command: {command_type}")
return json.loads(line)
def set_input_routing(client: AbletonSocketClient, track_index: int, routing_name: str) -> bool:
try:
response = client.send("set_track_input_routing", {
"track_index": track_index,
"routing_name": routing_name,
})
if response.get("status") == "success":
log(f"Set track {track_index} input routing to '{routing_name}'")
return True
else:
log(f"Failed to set input routing: {response.get('message', 'unknown error')}")
return False
except Exception as e:
log(f"Error setting input routing: {e}")
return False
def load_audio_pattern(client: AbletonSocketClient, track_index: int, file_path: str, positions: List[float], name: str = "") -> bool:
if not os.path.exists(file_path):
log(f"Sample not found: {file_path}")
return False
try:
response = client.send("create_arrangement_audio_pattern", {
"track_index": track_index,
"file_path": file_path,
"positions": positions,
"name": name or os.path.basename(file_path),
})
if response.get("status") == "success":
log(f"Loaded '{os.path.basename(file_path)}' at positions {positions[:3]}... on track {track_index}")
return True
else:
log(f"Failed to load audio: {response.get('message', 'unknown error')}")
return False
except Exception as e:
log(f"Error loading audio: {e}")
return False
def main():
log("=" * 60)
log("AGENT 9 - PERCUSSION SPECIALIST STARTING")
log("=" * 60)
client = AbletonSocketClient()
log("Connecting to Ableton socket...")
try:
info = client.send("get_session_info", {})
if info.get("status") != "success":
log("Failed to get session info")
return
log(f"Connected. BPM: {info.get('result', {}).get('tempo', 'unknown')}")
except Exception as e:
log(f"Connection failed: {e}")
return
log("Setting input routing to 'No Input'...")
set_input_routing(client, PERC_MAIN_TRACK_INDEX, "No Input")
set_input_routing(client, PERC_FX_TRACK_INDEX, "No Input")
log("")
log("Loading PERC MAIN loops...")
main_loaded = 0
for i, pos in enumerate(PERC_MAIN_POSITIONS):
if i < len(PERC_LOOP_SAMPLES):
sample = PERC_LOOP_SAMPLES[i]
if load_audio_pattern(client, PERC_MAIN_TRACK_INDEX, sample, [float(pos)], f"PERC_LOOP_{i+1}"):
main_loaded += 1
log(f"PERC MAIN: {main_loaded}/{len(PERC_MAIN_POSITIONS)} samples loaded")
log("")
log("Loading PERC FX hits...")
fx_loaded = 0
for i, pos in enumerate(PERC_FX_POSITIONS):
if i < len(PERC_FX_SAMPLES):
sample = PERC_FX_SAMPLES[i]
if load_audio_pattern(client, PERC_FX_TRACK_INDEX, sample, [float(pos)], f"PERC_FX_{i+1}"):
fx_loaded += 1
log(f"PERC FX: {fx_loaded}/{len(PERC_FX_POSITIONS)} samples loaded")
log("")
log("=" * 60)
log(f"AGENT 9 COMPLETE: MAIN={main_loaded}, FX={fx_loaded}")
log("=" * 60)
if __name__ == "__main__":
main()

View File

@@ -1,681 +0,0 @@
"""
audio_analyzer.py - Análisis de audio para detección de Key y BPM
Proporciona análisis básico de archivos de audio para extraer:
- BPM (tempo) mediante detección de onset y autocorrelación
- Key (tonalidad) mediante análisis de cromagrama
- Características espectrales para clasificación
"""
import os
import logging
import numpy as np
import subprocess
from pathlib import Path
from typing import Dict, Any, Optional, Tuple, List
from dataclasses import dataclass
from enum import Enum
logger = logging.getLogger("AudioAnalyzer")
# Constantes musicales
NOTE_NAMES = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
KEY_PROFILES = {
# Perfiles de Krumhansl-Schmuckler para detección de tonalidad
'major': [6.35, 2.23, 3.48, 2.33, 4.38, 4.09, 2.52, 5.19, 2.39, 3.66, 2.29, 2.88],
'minor': [6.33, 2.68, 3.52, 5.38, 2.60, 3.53, 2.54, 4.75, 3.98, 2.69, 3.34, 3.17]
}
CIRCLE_OF_FIFTHS_MAJOR = ['C', 'G', 'D', 'A', 'E', 'B', 'F#', 'C#', 'G#', 'D#', 'A#', 'F']
CIRCLE_OF_FIFTHS_MINOR = ['Am', 'Em', 'Bm', 'F#m', 'C#m', 'G#m', 'D#m', 'A#m', 'Fm', 'Cm', 'Gm', 'Dm']
class SampleType(Enum):
"""Tipos de samples musicales"""
KICK = "kick"
SNARE = "snare"
CLAP = "clap"
HAT_CLOSED = "hat_closed"
HAT_OPEN = "hat_open"
HAT = "hat"
PERC = "perc"
SHAKER = "shaker"
TOM = "tom"
CRASH = "crash"
RIDE = "ride"
BASS = "bass"
SYNTH = "synth"
PAD = "pad"
LEAD = "lead"
PLUCK = "pluck"
ARP = "arp"
CHORD = "chord"
STAB = "stab"
VOCAL = "vocal"
FX = "fx"
LOOP = "loop"
AMBIENCE = "ambience"
UNKNOWN = "unknown"
@dataclass
class AudioFeatures:
"""Características extraídas de un archivo de audio"""
bpm: Optional[float]
key: Optional[str]
key_confidence: float
duration: float
sample_rate: int
sample_type: SampleType
spectral_centroid: float
spectral_rolloff: float
zero_crossing_rate: float
rms_energy: float
is_harmonic: bool
is_percussive: bool
suggested_genres: List[str]
class AudioAnalyzer:
"""
Analizador de audio para samples musicales.
Soporta múltiples backends:
- librosa (recomendado, más preciso)
- basic (fallback sin dependencias externas, basado en nombre de archivo)
"""
def __init__(self, backend: str = "auto"):
"""
Inicializa el analizador de audio.
Args:
backend: 'librosa', 'basic', o 'auto' (detecta automáticamente)
"""
self.backend = backend
self._librosa_available = False
self._soundfile_available = False
if backend in ("auto", "librosa"):
self._check_librosa()
if self._librosa_available:
logger.info("Usando backend: librosa")
else:
logger.info("Usando backend: basic (análisis por nombre de archivo)")
def _check_librosa(self):
"""Verifica si librosa está disponible"""
try:
import librosa
import soundfile as sf
self._librosa_available = True
self._soundfile_available = True
self.librosa = librosa
self.sf = sf
except ImportError:
self._librosa_available = False
self._soundfile_available = False
def analyze(self, file_path: str) -> AudioFeatures:
"""
Analiza un archivo de audio y extrae características.
Args:
file_path: Ruta al archivo de audio
Returns:
AudioFeatures con los datos extraídos
"""
path = Path(file_path)
if not path.exists():
raise FileNotFoundError(f"Archivo no encontrado: {file_path}")
# Intentar análisis con librosa si está disponible
if self._librosa_available:
try:
return self._analyze_with_librosa(file_path)
except Exception as e:
logger.warning(f"Error con librosa: {e}, usando análisis básico")
# Fallback a análisis básico
return self._analyze_basic(file_path)
def _analyze_with_librosa(self, file_path: str) -> AudioFeatures:
"""Análisis completo usando librosa"""
# Cargar audio
y, sr = self.librosa.load(file_path, sr=None, mono=True)
# Duración
duration = self.librosa.get_duration(y=y, sr=sr)
# Detectar BPM
tempo, _ = self.librosa.beat.beat_track(y=y, sr=sr)
bpm = float(tempo) if isinstance(tempo, (int, float, np.number)) else None
# Análisis espectral
spectral_centroids = self.librosa.feature.spectral_centroid(y=y, sr=sr)[0]
spectral_rolloffs = self.librosa.feature.spectral_rolloff(y=y, sr=sr)[0]
zcr = self.librosa.feature.zero_crossing_rate(y)[0]
rms = self.librosa.feature.rms(y=y)[0]
# Detectar key
key, key_confidence = self._detect_key_librosa(y, sr)
# Clasificación percusivo vs armónico
is_percussive = self._is_percussive(y, sr)
is_harmonic = not is_percussive and duration > 1.0
# Determinar tipo de sample
sample_type = self._classify_sample_type(
file_path, is_percussive, is_harmonic, duration,
float(np.mean(spectral_centroids)), float(np.mean(rms))
)
# Sugerir géneros
suggested_genres = self._suggest_genres(sample_type, bpm, key)
return AudioFeatures(
bpm=bpm,
key=key,
key_confidence=key_confidence,
duration=duration,
sample_rate=sr,
sample_type=sample_type,
spectral_centroid=float(np.mean(spectral_centroids)),
spectral_rolloff=float(np.mean(spectral_rolloffs)),
zero_crossing_rate=float(np.mean(zcr)),
rms_energy=float(np.mean(rms)),
is_harmonic=is_harmonic,
is_percussive=is_percussive,
suggested_genres=suggested_genres
)
def _detect_key_librosa(self, y: np.ndarray, sr: int) -> Tuple[Optional[str], float]:
"""
Detecta la tonalidad usando cromagrama y correlación con perfiles.
"""
try:
# Calcular cromagrama
chroma = self.librosa.feature.chroma_stft(y=y, sr=sr)
chroma_avg = np.mean(chroma, axis=1)
# Normalizar
chroma_avg = chroma_avg / (np.sum(chroma_avg) + 1e-10)
best_key = None
best_score = -np.inf
best_mode = None
# Probar todas las tonalidades mayores y menores
for mode, profile in KEY_PROFILES.items():
for i in range(12):
# Rotar el perfil
rotated_profile = np.roll(profile, i)
# Correlación
score = np.corrcoef(chroma_avg, rotated_profile)[0, 1]
if score > best_score:
best_score = score
best_mode = mode
best_key = NOTE_NAMES[i]
# Formatear resultado
if best_key:
if best_mode == 'minor':
best_key = best_key + 'm'
confidence = max(0.0, min(1.0, (best_score + 1) / 2))
return best_key, confidence
except Exception as e:
logger.warning(f"Error detectando key: {e}")
return None, 0.0
def _is_percussive(self, y: np.ndarray, sr: int) -> bool:
"""
Determina si un sonido es principalmente percusivo.
"""
try:
# Separar componentes armónicos y percusivos
y_harmonic, y_percussive = self.librosa.effects.hpss(y)
# Calcular energía relativa
energy_harmonic = np.sum(y_harmonic ** 2)
energy_percussive = np.sum(y_percussive ** 2)
total_energy = energy_harmonic + energy_percussive
if total_energy > 0:
percussive_ratio = energy_percussive / total_energy
return percussive_ratio > 0.6
except Exception as e:
logger.warning(f"Error en separación HPSS: {e}")
# Fallback: usar duración como heurística
duration = len(y) / sr
return duration < 0.5
def _analyze_basic(self, file_path: str) -> AudioFeatures:
"""
Análisis básico sin dependencias externas.
Usa metadatos del archivo y nombre para inferir características.
"""
path = Path(file_path)
name = path.stem
# Extraer del nombre
bpm = self._extract_bpm_from_name(name)
key = self._extract_key_from_name(name)
# Estimar duración del archivo
duration = self._estimate_duration(file_path)
# Clasificar por nombre
sample_type = self._classify_by_name(name)
# Determinar características por tipo
is_percussive = sample_type in [
SampleType.KICK, SampleType.SNARE, SampleType.CLAP,
SampleType.HAT, SampleType.HAT_CLOSED, SampleType.HAT_OPEN,
SampleType.PERC, SampleType.SHAKER, SampleType.TOM,
SampleType.CRASH, SampleType.RIDE
]
is_harmonic = sample_type in [
SampleType.BASS, SampleType.SYNTH, SampleType.PAD,
SampleType.LEAD, SampleType.PLUCK, SampleType.CHORD,
SampleType.VOCAL
]
# Valores por defecto basados en tipo
spectral_centroid = 5000.0 if is_percussive else 1000.0
rms_energy = 0.5
suggested_genres = self._suggest_genres(sample_type, bpm, key)
return AudioFeatures(
bpm=bpm,
key=key,
key_confidence=0.7 if key else 0.0,
duration=duration,
sample_rate=44100,
sample_type=sample_type,
spectral_centroid=spectral_centroid,
spectral_rolloff=spectral_centroid * 2,
zero_crossing_rate=0.1 if is_harmonic else 0.3,
rms_energy=rms_energy,
is_harmonic=is_harmonic,
is_percussive=is_percussive,
suggested_genres=suggested_genres
)
def _estimate_duration(self, file_path: str) -> float:
"""Estima la duración del archivo de audio"""
try:
import wave
ext = Path(file_path).suffix.lower()
if ext == '.wav':
with wave.open(file_path, 'rb') as wav:
frames = wav.getnframes()
rate = wav.getframerate()
return frames / float(rate)
elif ext in ('.mp3', '.ogg', '.flac', '.aif', '.aiff', '.m4a'):
windows_duration = self._estimate_duration_with_windows_shell(file_path)
if windows_duration > 0:
return windows_duration
# Estimación por tamaño de archivo
size = os.path.getsize(file_path)
# Aproximación: ~176KB por segundo para CD quality stereo
return size / (176.4 * 1024)
except Exception as e:
logger.warning(f"Error estimando duración: {e}")
return 0.0
def _estimate_duration_with_windows_shell(self, file_path: str) -> float:
"""Obtiene la duración usando metadatos del shell de Windows cuando están disponibles."""
if os.name != 'nt':
return 0.0
safe_path = file_path.replace("'", "''")
powershell_command = (
f"$path = '{safe_path}'; "
"$shell = New-Object -ComObject Shell.Application; "
"$folder = $shell.Namespace((Split-Path $path)); "
"$file = $folder.ParseName((Split-Path $path -Leaf)); "
"$duration = $folder.GetDetailsOf($file, 27); "
"Write-Output $duration"
)
try:
result = subprocess.run(
f'powershell -NoProfile -Command "{powershell_command}"',
capture_output=True,
text=True,
timeout=5,
check=False,
shell=True,
)
value = (result.stdout or "").strip()
if not value:
return 0.0
parts = value.split(':')
if len(parts) == 3:
return (int(parts[0]) * 3600) + (int(parts[1]) * 60) + float(parts[2])
return 0.0
except Exception:
return 0.0
def _extract_bpm_from_name(self, name: str) -> Optional[float]:
"""Extrae BPM del nombre del archivo"""
import re
patterns = [
r'[_\s\-](\d{2,3})\s*BPM',
r'[_\s\-](\d{2,3})[_\s\-]',
r'(\d{2,3})bpm',
r'[_\s\-](\d{2,3})\s*(?:BPM|bpm)?\s*(?:\.wav|\.mp3|\.aif)',
]
for pattern in patterns:
match = re.search(pattern, name, re.IGNORECASE)
if match:
bpm = int(match.group(1))
if 60 <= bpm <= 200:
return float(bpm)
return None
def _extract_key_from_name(self, name: str) -> Optional[str]:
"""Extrae key del nombre del archivo"""
import re
patterns = [
r'[_\s\-]([A-G][#b]?(?:m|min|minor)?)[_\s\-]',
r'\bin\s+([A-G][#b]?(?:m|min|minor)?)\b',
r'Key\s+([A-G][#b]?(?:m|min|minor)?)',
r'[_\s\-]([A-G][#b]?)\s*(?:maj|major)?[_\s\-]',
]
for pattern in patterns:
match = re.search(pattern, name, re.IGNORECASE)
if match:
key = match.group(1)
# Normalizar
key = key.replace('b', '#').replace('Db', 'C#').replace('Eb', 'D#')
key = key.replace('Gb', 'F#').replace('Ab', 'G#').replace('Bb', 'A#')
# Detectar si es menor
is_minor = 'm' in key.lower() or 'min' in key.lower()
key = key.replace('min', '').replace('minor', '').replace('major', '')
key = key.rstrip('mM')
if is_minor:
key = key + 'm'
return key
return None
def _classify_sample_type(self, file_path: str, is_percussive: bool,
is_harmonic: bool, duration: float,
spectral_centroid: float, rms: float) -> SampleType:
"""Clasifica el tipo de sample basado en características"""
# Primero intentar por nombre
sample_type = self._classify_by_name(Path(file_path).stem)
if sample_type != SampleType.UNKNOWN:
return sample_type
# Clasificación por características de audio
if is_percussive:
if duration < 0.1:
if spectral_centroid < 2000:
return SampleType.KICK
elif spectral_centroid > 8000:
return SampleType.HAT_CLOSED
else:
return SampleType.SNARE
elif duration < 0.3:
return SampleType.CLAP
else:
return SampleType.PERC
elif is_harmonic:
if spectral_centroid < 500:
return SampleType.BASS
elif duration > 4.0:
return SampleType.PAD
else:
return SampleType.SYNTH
return SampleType.UNKNOWN
def _classify_by_name(self, name: str) -> SampleType:
"""Clasifica el tipo de sample basado en su nombre"""
name_lower = name.lower()
# Mapeo de palabras clave a tipos
keywords = {
SampleType.KICK: ['kick', 'bd', 'bass drum', 'kickdrum', 'kik'],
SampleType.SNARE: ['snare', 'snr', 'sd', 'rim'],
SampleType.CLAP: ['clap', 'clp', 'handclap'],
SampleType.HAT_CLOSED: ['closed hat', 'closedhat', 'chh', 'closed'],
SampleType.HAT_OPEN: ['open hat', 'openhat', 'ohh', 'open'],
SampleType.HAT: ['hat', 'hihat', 'hi-hat', 'hh'],
SampleType.PERC: ['perc', 'percussion', 'conga', 'bongo', 'timb'],
SampleType.SHAKER: ['shaker', 'shake', 'tamb'],
SampleType.TOM: ['tom', 'tomtom'],
SampleType.CRASH: ['crash', 'cymbal'],
SampleType.RIDE: ['ride'],
SampleType.BASS: ['bass', 'bassline', 'sub', '808', 'reese'],
SampleType.SYNTH: ['synth', 'lead', 'arp', 'sequence'],
SampleType.PAD: ['pad', 'atmosphere', 'dron'],
SampleType.PLUCK: ['pluck'],
SampleType.CHORD: ['chord', 'stab'],
SampleType.VOCAL: ['vocal', 'vox', 'voice', 'speech', 'talk'],
SampleType.FX: ['fx', 'effect', 'sweep', 'riser', 'downlifter', 'impact', 'hit', 'noise'],
SampleType.LOOP: ['loop', 'full', 'groove'],
}
for sample_type, words in keywords.items():
for word in words:
if word in name_lower:
return sample_type
return SampleType.UNKNOWN
def _suggest_genres(self, sample_type: SampleType, bpm: Optional[float],
key: Optional[str]) -> List[str]:
"""Sugiere géneros musicales apropiados para el sample"""
genres = []
if bpm:
if 118 <= bpm <= 128:
genres.extend(['house', 'tech-house', 'deep-house'])
elif 124 <= bpm <= 132:
genres.extend(['tech-house', 'techno'])
elif 132 <= bpm <= 142:
genres.extend(['techno', 'peak-time-techno'])
elif 142 <= bpm <= 150:
genres.extend(['trance', 'hard-techno'])
elif 160 <= bpm <= 180:
genres.extend(['drum-and-bass', 'neurofunk'])
elif bpm < 118:
genres.extend(['downtempo', 'ambient', 'lo-fi'])
# Por tipo de sample
if sample_type in [SampleType.KICK, SampleType.SNARE, SampleType.CLAP]:
if not genres:
genres = ['techno', 'house']
elif sample_type == SampleType.BASS:
if not genres:
genres = ['techno', 'house', 'bass-music']
elif sample_type in [SampleType.SYNTH, SampleType.PAD]:
if not genres:
genres = ['trance', 'progressive', 'ambient']
return genres if genres else ['electronic']
def get_compatible_key(self, key: str, shift: int = 0) -> str:
"""
Obtiene una key compatible usando el círculo de quintas.
Args:
key: Key original (ej: 'Am', 'F#m')
shift: Desplazamiento en el círculo (+1 = quinta arriba, -1 = quinta abajo)
Returns:
Key resultante
"""
is_minor = key.endswith('m')
root = key.rstrip('m')
if root not in NOTE_NAMES:
return key
circle = CIRCLE_OF_FIFTHS_MINOR if is_minor else CIRCLE_OF_FIFTHS_MAJOR
try:
idx = circle.index(key)
new_idx = (idx + shift) % 12
return circle[new_idx]
except ValueError:
return key
def calculate_key_compatibility(self, key1: str, key2: str) -> float:
"""
Calcula la compatibilidad entre dos keys (0-1).
Usa el círculo de quintas: keys cercanas son más compatibles.
"""
if key1 == key2:
return 1.0
# Normalizar
def normalize(k):
is_minor = k.endswith('m')
root = k.rstrip('m')
# Convertir bemoles a sostenidos
root = root.replace('Db', 'C#').replace('Eb', 'D#')
root = root.replace('Gb', 'F#').replace('Ab', 'G#').replace('Bb', 'A#')
return root + ('m' if is_minor else '')
k1 = normalize(key1)
k2 = normalize(key2)
if k1 == k2:
return 1.0
# Verificar si son modos diferentes de la misma nota
if k1.rstrip('m') == k2.rstrip('m'):
return 0.8 # Mismo root, diferente modo
# Usar círculo de quintas
is_minor1 = k1.endswith('m')
is_minor2 = k2.endswith('m')
if is_minor1 != is_minor2:
return 0.3 # Diferente modo, baja compatibilidad
circle = CIRCLE_OF_FIFTHS_MINOR if is_minor1 else CIRCLE_OF_FIFTHS_MAJOR
try:
idx1 = circle.index(k1)
idx2 = circle.index(k2)
distance = min(abs(idx1 - idx2), 12 - abs(idx1 - idx2))
# Compatibilidad decrece con la distancia
compatibility = max(0.0, 1.0 - (distance * 0.2))
return compatibility
except ValueError:
return 0.0
# Instancia global
_analyzer: Optional[AudioAnalyzer] = None
def get_analyzer() -> AudioAnalyzer:
"""Obtiene la instancia global del analizador"""
global _analyzer
if _analyzer is None:
_analyzer = AudioAnalyzer()
return _analyzer
def analyze_sample(file_path: str) -> Dict[str, Any]:
"""
Función de conveniencia para analizar un sample.
Returns:
Diccionario con las características del sample
"""
analyzer = get_analyzer()
features = analyzer.analyze(file_path)
return {
'bpm': features.bpm,
'key': features.key,
'key_confidence': features.key_confidence,
'duration': features.duration,
'sample_rate': features.sample_rate,
'sample_type': features.sample_type.value,
'spectral_centroid': features.spectral_centroid,
'rms_energy': features.rms_energy,
'is_harmonic': features.is_harmonic,
'is_percussive': features.is_percussive,
'suggested_genres': features.suggested_genres,
}
def quick_analyze(file_path: str) -> Dict[str, Any]:
"""
Análisis rápido basado solo en el nombre del archivo.
No requiere dependencias externas.
"""
analyzer = AudioAnalyzer(backend="basic")
features = analyzer.analyze(file_path)
return {
'bpm': features.bpm,
'key': features.key,
'sample_type': features.sample_type.value,
'suggested_genres': features.suggested_genres,
}
# Testing
if __name__ == "__main__":
import sys
logging.basicConfig(level=logging.INFO)
if len(sys.argv) < 2:
print("Uso: python audio_analyzer.py <archivo_de_audio>")
sys.exit(1)
file_path = sys.argv[1]
print(f"\nAnalizando: {file_path}")
print("=" * 50)
try:
result = analyze_sample(file_path)
print("\nResultados:")
print(f" BPM: {result['bpm'] or 'No detectado'}")
print(f" Key: {result['key'] or 'No detectado'} (confianza: {result['key_confidence']:.2f})")
print(f" Duración: {result['duration']:.2f}s")
print(f" Tipo: {result['sample_type']}")
print(f" Géneros sugeridos: {', '.join(result['suggested_genres'])}")
print(f" Es percusivo: {result['is_percussive']}")
print(f" Es armónico: {result['is_harmonic']}")
except Exception as e:
print(f"Error: {e}")
sys.exit(1)

File diff suppressed because it is too large Load Diff

View File

@@ -1,431 +0,0 @@
"""
Enhanced Device Automation for Timbral Movement Between Sections.
This module provides expanded device automation parameters for musical variation.
"""
# =============================================================================
# ENHANCED SECTION DEVICE AUTOMATION - More timbral color per section
# =============================================================================
# Automatizacion de devices en tracks individuales por rol - ENHANCED
SECTION_DEVICE_AUTOMATION = {
# BASS - Filtros, drive y compresion dinamica
'bass': {
'Saturator': {
'Drive': {'intro': 1.5, 'build': 3.5, 'drop': 5.0, 'break': 2.0, 'outro': 1.8},
'Dry/Wet': {'intro': 0.12, 'build': 0.22, 'drop': 0.30, 'break': 0.15, 'outro': 0.10},
},
'Auto Filter': {
'Frequency': {'intro': 6200.0, 'build': 8500.0, 'drop': 12000.0, 'break': 4800.0, 'outro': 5800.0},
'Dry/Wet': {'intro': 0.08, 'build': 0.18, 'drop': 0.12, 'break': 0.22, 'outro': 0.06},
'Resonance': {'intro': 0.25, 'build': 0.35, 'drop': 0.20, 'break': 0.40, 'outro': 0.28},
},
'Compressor': {
'Threshold': {'intro': -12.0, 'build': -14.0, 'drop': -18.0, 'break': -10.0, 'outro': -11.0},
'Ratio': {'intro': 2.5, 'build': 3.0, 'drop': 4.0, 'break': 2.0, 'outro': 2.2},
},
'Utility': {
'Stereo Width': {'intro': 0.0, 'build': 0.0, 'drop': 0.0, 'break': 0.0, 'outro': 0.0},
},
},
'sub_bass': {
'Saturator': {
'Drive': {'intro': 1.0, 'build': 2.5, 'drop': 4.0, 'break': 1.5, 'outro': 1.2},
},
'Auto Filter': {
'Frequency': {'intro': 5200.0, 'build': 7200.0, 'drop': 10000.0, 'break': 4200.0, 'outro': 4800.0},
'Dry/Wet': {'intro': 0.05, 'build': 0.10, 'drop': 0.06, 'break': 0.14, 'outro': 0.04},
},
'Utility': {
'Width': {'intro': 0.0, 'build': 0.0, 'drop': 0.0, 'break': 0.0, 'outro': 0.0},
'Gain': {'intro': 0.0, 'build': 0.2, 'drop': 0.4, 'break': -0.2, 'outro': 0.0},
},
},
# PAD - Filtros envolventes con width y reverb
'pad': {
'Auto Filter': {
'Frequency': {'intro': 4500.0, 'build': 8000.0, 'drop': 11000.0, 'break': 3200.0, 'outro': 4000.0},
'Dry/Wet': {'intro': 0.25, 'build': 0.18, 'drop': 0.12, 'break': 0.35, 'outro': 0.28},
'Resonance': {'intro': 0.20, 'build': 0.28, 'drop': 0.15, 'break': 0.35, 'outro': 0.22},
},
'Hybrid Reverb': {
'Dry/Wet': {'intro': 0.22, 'build': 0.16, 'drop': 0.10, 'break': 0.28, 'outro': 0.24},
'Decay Time': {'intro': 3.5, 'build': 2.8, 'drop': 2.0, 'break': 4.2, 'outro': 3.8},
},
'Utility': {
'Stereo Width': {'intro': 0.85, 'build': 1.02, 'drop': 1.12, 'break': 1.25, 'outro': 0.90},
},
'Saturator': {
'Drive': {'intro': 0.8, 'build': 1.5, 'drop': 2.5, 'break': 0.6, 'outro': 0.7},
'Dry/Wet': {'intro': 0.10, 'build': 0.15, 'drop': 0.20, 'break': 0.08, 'outro': 0.12},
},
},
# ATMOS - Filtros espaciales con movement
'atmos': {
'Auto Filter': {
'Frequency': {'intro': 3800.0, 'build': 7200.0, 'drop': 9800.0, 'break': 2800.0, 'outro': 3500.0},
'Dry/Wet': {'intro': 0.30, 'build': 0.22, 'drop': 0.15, 'break': 0.40, 'outro': 0.32},
'Resonance': {'intro': 0.22, 'build': 0.32, 'drop': 0.18, 'break': 0.42, 'outro': 0.25},
},
'Hybrid Reverb': {
'Dry/Wet': {'intro': 0.35, 'build': 0.28, 'drop': 0.18, 'break': 0.42, 'outro': 0.38},
'Decay Time': {'intro': 4.0, 'build': 3.2, 'drop': 2.2, 'break': 5.0, 'outro': 4.5},
},
'Utility': {
'Stereo Width': {'intro': 0.70, 'build': 0.88, 'drop': 1.05, 'break': 1.20, 'outro': 0.75},
},
},
# FX ELEMENTS
'reverse_fx': {
'Auto Filter': {
'Frequency': {'intro': 5200.0, 'build': 9000.0, 'drop': 12000.0, 'break': 6000.0, 'outro': 4800.0},
'Dry/Wet': {'intro': 0.20, 'build': 0.28, 'drop': 0.15, 'break': 0.35, 'outro': 0.22},
},
'Hybrid Reverb': {
'Dry/Wet': {'intro': 0.30, 'build': 0.35, 'drop': 0.20, 'break': 0.40, 'outro': 0.28},
'Decay Time': {'intro': 3.0, 'build': 4.5, 'drop': 2.5, 'break': 5.5, 'outro': 3.5},
},
'Saturator': {
'Drive': {'intro': 1.2, 'build': 2.8, 'drop': 4.5, 'break': 1.8, 'outro': 1.0},
},
},
'riser': {
'Auto Filter': {
'Frequency': {'intro': 4000.0, 'build': 10000.0, 'drop': 14000.0, 'break': 5500.0, 'outro': 4200.0},
'Dry/Wet': {'intro': 0.15, 'build': 0.30, 'drop': 0.12, 'break': 0.22, 'outro': 0.18},
},
'Hybrid Reverb': {
'Dry/Wet': {'intro': 0.25, 'build': 0.40, 'drop': 0.22, 'break': 0.35, 'outro': 0.20},
'Decay Time': {'intro': 2.5, 'build': 5.0, 'drop': 3.0, 'break': 4.0, 'outro': 2.8},
},
'Echo': {
'Dry/Wet': {'intro': 0.18, 'build': 0.35, 'drop': 0.15, 'break': 0.25, 'outro': 0.15},
'Feedback': {'intro': 0.30, 'build': 0.55, 'drop': 0.25, 'break': 0.45, 'outro': 0.28},
},
'Saturator': {
'Drive': {'intro': 1.5, 'build': 4.0, 'drop': 3.0, 'break': 2.5, 'outro': 1.2},
},
},
'impact': {
'Hybrid Reverb': {
'Dry/Wet': {'intro': 0.15, 'build': 0.18, 'drop': 0.12, 'break': 0.20, 'outro': 0.14},
'Decay Time': {'intro': 2.0, 'build': 2.5, 'drop': 1.8, 'break': 3.0, 'outro': 2.2},
},
'Saturator': {
'Drive': {'intro': 1.8, 'build': 2.5, 'drop': 3.5, 'break': 2.0, 'outro': 1.5},
},
},
'drone': {
'Auto Filter': {
'Frequency': {'intro': 3000.0, 'build': 6500.0, 'drop': 9000.0, 'break': 2500.0, 'outro': 2800.0},
'Dry/Wet': {'intro': 0.20, 'build': 0.15, 'drop': 0.10, 'break': 0.30, 'outro': 0.22},
'Resonance': {'intro': 0.25, 'build': 0.35, 'drop': 0.22, 'break': 0.40, 'outro': 0.28},
},
'Hybrid Reverb': {
'Dry/Wet': {'intro': 0.18, 'build': 0.14, 'drop': 0.08, 'break': 0.25, 'outro': 0.20},
'Decay Time': {'intro': 4.5, 'build': 3.5, 'drop': 2.5, 'break': 5.5, 'outro': 4.8},
},
'Saturator': {
'Drive': {'intro': 0.8, 'build': 1.8, 'drop': 2.8, 'break': 0.6, 'outro': 0.7},
},
},
# HATS - Filtros de brillantez con resonance y saturacion
'hat_closed': {
'Auto Filter': {
'Frequency': {'intro': 12000.0, 'build': 14000.0, 'drop': 16000.0, 'break': 10000.0, 'outro': 11000.0},
'Dry/Wet': {'intro': 0.12, 'build': 0.16, 'drop': 0.10, 'break': 0.20, 'outro': 0.14},
'Resonance': {'intro': 0.15, 'build': 0.25, 'drop': 0.12, 'outro': 0.18, 'break': 0.30},
},
'Saturator': {
'Drive': {'intro': 0.5, 'build': 1.2, 'drop': 1.8, 'break': 0.8, 'outro': 0.6},
},
},
'hat_open': {
'Auto Filter': {
'Frequency': {'intro': 9000.0, 'build': 11000.0, 'drop': 13000.0, 'break': 7500.0, 'outro': 8500.0},
'Dry/Wet': {'intro': 0.18, 'build': 0.22, 'drop': 0.14, 'break': 0.28, 'outro': 0.20},
'Resonance': {'intro': 0.18, 'build': 0.28, 'drop': 0.15, 'outro': 0.20, 'break': 0.35},
},
'Echo': {
'Dry/Wet': {'intro': 0.08, 'build': 0.15, 'drop': 0.10, 'break': 0.22, 'outro': 0.12},
},
},
'top_loop': {
'Auto Filter': {
'Frequency': {'intro': 8500.0, 'build': 10500.0, 'drop': 12500.0, 'break': 7000.0, 'outro': 8000.0},
'Dry/Wet': {'intro': 0.20, 'build': 0.25, 'drop': 0.16, 'break': 0.32, 'outro': 0.22},
'Resonance': {'intro': 0.12, 'build': 0.22, 'drop': 0.14, 'outro': 0.15, 'break': 0.28},
},
'Echo': {
'Dry/Wet': {'intro': 0.05, 'build': 0.12, 'drop': 0.08, 'break': 0.18, 'outro': 0.10},
},
},
# SYNTHS
'chords': {
'Auto Filter': {
'Frequency': {'intro': 5500.0, 'build': 8500.0, 'drop': 11000.0, 'break': 4000.0, 'outro': 5000.0},
'Dry/Wet': {'intro': 0.15, 'build': 0.20, 'drop': 0.12, 'break': 0.28, 'outro': 0.18},
'Resonance': {'intro': 0.18, 'build': 0.28, 'drop': 0.15, 'outro': 0.20, 'break': 0.35},
},
'Echo': {
'Dry/Wet': {'intro': 0.10, 'build': 0.18, 'drop': 0.08, 'break': 0.22, 'outro': 0.12},
'Feedback': {'intro': 0.25, 'build': 0.40, 'drop': 0.30, 'break': 0.45, 'outro': 0.28},
},
'Saturator': {
'Drive': {'intro': 1.2, 'build': 2.2, 'drop': 3.5, 'break': 1.5, 'outro': 1.0},
},
'Utility': {
'Stereo Width': {'intro': 0.95, 'build': 1.05, 'drop': 1.15, 'break': 1.25, 'outro': 1.00},
},
},
'lead': {
'Saturator': {
'Drive': {'intro': 1.0, 'build': 2.5, 'drop': 4.0, 'break': 1.5, 'outro': 1.2},
'Dry/Wet': {'intro': 0.12, 'build': 0.20, 'drop': 0.25, 'break': 0.10, 'outro': 0.15},
},
'Echo': {
'Dry/Wet': {'intro': 0.08, 'build': 0.15, 'drop': 0.10, 'break': 0.18, 'outro': 0.10},
'Feedback': {'intro': 0.20, 'build': 0.35, 'drop': 0.28, 'break': 0.40, 'outro': 0.22},
},
'Auto Filter': {
'Frequency': {'intro': 6500.0, 'build': 9500.0, 'drop': 12500.0, 'break': 4500.0, 'outro': 5500.0},
'Dry/Wet': {'intro': 0.08, 'build': 0.15, 'drop': 0.10, 'break': 0.20, 'outro': 0.12},
},
'Utility': {
'Stereo Width': {'intro': 0.90, 'build': 1.02, 'drop': 1.10, 'break': 1.18, 'outro': 0.95},
},
},
'stab': {
'Saturator': {
'Drive': {'intro': 2.0, 'build': 3.5, 'drop': 5.0, 'break': 2.5, 'outro': 2.2},
'Dry/Wet': {'intro': 0.18, 'build': 0.25, 'drop': 0.30, 'break': 0.15, 'outro': 0.20},
},
'Auto Filter': {
'Frequency': {'intro': 6000.0, 'build': 9000.0, 'drop': 12000.0, 'break': 5000.0, 'outro': 5500.0},
'Dry/Wet': {'intro': 0.10, 'build': 0.15, 'drop': 0.08, 'break': 0.22, 'outro': 0.12},
},
'Utility': {
'Stereo Width': {'intro': 0.88, 'build': 1.00, 'drop': 1.12, 'break': 1.20, 'outro': 0.92},
},
},
'pluck': {
'Echo': {
'Dry/Wet': {'intro': 0.12, 'build': 0.22, 'drop': 0.14, 'break': 0.28, 'outro': 0.15},
'Feedback': {'intro': 0.30, 'build': 0.45, 'drop': 0.35, 'break': 0.50, 'outro': 0.32},
},
'Auto Filter': {
'Frequency': {'intro': 7000.0, 'build': 10000.0, 'drop': 13000.0, 'break': 5500.0, 'outro': 6500.0},
'Dry/Wet': {'intro': 0.08, 'build': 0.15, 'drop': 0.10, 'break': 0.20, 'outro': 0.12},
},
'Saturator': {
'Drive': {'intro': 0.8, 'build': 1.8, 'drop': 2.8, 'break': 1.2, 'outro': 0.9},
},
},
'arp': {
'Echo': {
'Dry/Wet': {'intro': 0.15, 'build': 0.28, 'drop': 0.18, 'break': 0.35, 'outro': 0.18},
'Feedback': {'intro': 0.35, 'build': 0.50, 'drop': 0.40, 'break': 0.58, 'outro': 0.38},
},
'Auto Filter': {
'Frequency': {'intro': 6500.0, 'build': 9500.0, 'drop': 12500.0, 'break': 5000.0, 'outro': 6000.0},
'Dry/Wet': {'intro': 0.12, 'build': 0.18, 'drop': 0.14, 'break': 0.25, 'outro': 0.15},
},
'Saturator': {
'Drive': {'intro': 0.6, 'build': 1.5, 'drop': 2.5, 'break': 1.0, 'outro': 0.7},
},
},
'counter': {
'Echo': {
'Dry/Wet': {'intro': 0.10, 'build': 0.18, 'drop': 0.12, 'break': 0.22, 'outro': 0.12},
},
'Auto Filter': {
'Frequency': {'intro': 6000.0, 'build': 8800.0, 'drop': 11500.0, 'break': 4800.0, 'outro': 5200.0},
'Dry/Wet': {'intro': 0.10, 'build': 0.16, 'drop': 0.12, 'break': 0.22, 'outro': 0.14},
},
'Utility': {
'Stereo Width': {'intro': 0.75, 'build': 0.92, 'drop': 1.08, 'break': 1.15, 'outro': 0.80},
},
},
# VOCAL
'vocal': {
'Echo': {
'Dry/Wet': {'intro': 0.12, 'build': 0.25, 'drop': 0.15, 'break': 0.30, 'outro': 0.14},
'Feedback': {'intro': 0.25, 'build': 0.42, 'drop': 0.30, 'break': 0.48, 'outro': 0.28},
},
'Hybrid Reverb': {
'Dry/Wet': {'intro': 0.08, 'build': 0.15, 'drop': 0.06, 'break': 0.18, 'outro': 0.10},
'Decay Time': {'intro': 2.5, 'build': 3.5, 'drop': 2.0, 'break': 4.0, 'outro': 2.8},
},
'Auto Filter': {
'Frequency': {'intro': 6000.0, 'build': 9000.0, 'drop': 11000.0, 'break': 5000.0, 'outro': 5500.0},
'Dry/Wet': {'intro': 0.10, 'build': 0.16, 'drop': 0.10, 'break': 0.20, 'outro': 0.12},
},
'Saturator': {
'Drive': {'intro': 0.8, 'build': 1.8, 'drop': 2.5, 'break': 1.2, 'outro': 0.9},
},
},
# DRUMS - Sin automatizacion de devices (manejados por volumen/sends)
'kick': {},
'clap': {},
'snare_fill': {},
'perc': {},
'ride': {},
'tom_fill': {},
'crash': {},
'sc_trigger': {},
}
# =============================================================================
# ENHANCED BUS DEVICE AUTOMATION - More drive/compression per section
# =============================================================================
BUS_DEVICE_AUTOMATION = {
'drums': {
'Compressor': {
'Threshold': {'intro': -14.0, 'build': -16.0, 'drop': -18.5, 'break': -12.0, 'outro': -13.5},
'Ratio': {'intro': 2.5, 'build': 3.0, 'drop': 4.0, 'break': 2.2, 'outro': 2.4},
'Attack': {'intro': 0.015, 'build': 0.010, 'drop': 0.005, 'break': 0.020, 'outro': 0.018},
},
'Saturator': {
'Drive': {'intro': 0.8, 'build': 1.5, 'drop': 2.5, 'break': 1.0, 'outro': 0.9},
'Dry/Wet': {'intro': 0.08, 'build': 0.15, 'drop': 0.22, 'break': 0.10, 'outro': 0.10},
},
'Limiter': {
'Gain': {'intro': 0.2, 'build': 0.3, 'drop': 0.5, 'break': 0.15, 'outro': 0.18},
},
'Auto Filter': {
'Frequency': {'intro': 8500.0, 'build': 10000.0, 'drop': 14000.0, 'break': 6500.0, 'outro': 7500.0},
'Dry/Wet': {'intro': 0.12, 'build': 0.10, 'drop': 0.05, 'break': 0.18, 'outro': 0.14},
},
},
'bass': {
'Saturator': {
'Drive': {'intro': 1.0, 'build': 2.0, 'drop': 3.5, 'break': 1.5, 'outro': 1.2},
'Dry/Wet': {'intro': 0.10, 'build': 0.18, 'drop': 0.25, 'break': 0.12, 'outro': 0.10},
},
'Compressor': {
'Threshold': {'intro': -15.0, 'build': -17.0, 'drop': -20.0, 'break': -14.0, 'outro': -14.5},
'Ratio': {'intro': 3.0, 'build': 3.5, 'drop': 4.5, 'break': 2.8, 'outro': 3.0},
'Attack': {'intro': 0.020, 'build': 0.015, 'drop': 0.008, 'break': 0.025, 'outro': 0.022},
},
'Utility': {
'Stereo Width': {'intro': 0.0, 'build': 0.0, 'drop': 0.0, 'break': 0.0, 'outro': 0.0},
},
'Auto Filter': {
'Frequency': {'intro': 5000.0, 'build': 7000.0, 'drop': 10000.0, 'break': 4500.0, 'outro': 5200.0},
'Dry/Wet': {'intro': 0.05, 'build': 0.08, 'drop': 0.12, 'break': 0.10, 'outro': 0.06},
},
},
'music': {
'Compressor': {
'Threshold': {'intro': -19.0, 'build': -20.0, 'drop': -22.0, 'break': -18.0, 'outro': -18.5},
'Ratio': {'intro': 2.0, 'build': 2.5, 'drop': 3.0, 'break': 1.8, 'outro': 2.0},
'Attack': {'intro': 0.025, 'build': 0.020, 'drop': 0.015, 'break': 0.030, 'outro': 0.028},
},
'Auto Filter': {
'Frequency': {'intro': 8000.0, 'build': 11000.0, 'drop': 14000.0, 'break': 6000.0, 'outro': 7500.0},
'Dry/Wet': {'intro': 0.08, 'build': 0.05, 'drop': 0.03, 'break': 0.12, 'outro': 0.10},
},
'Utility': {
'Stereo Width': {'intro': 1.05, 'build': 1.10, 'drop': 1.12, 'break': 1.18, 'outro': 1.08},
},
'Saturator': {
'Drive': {'intro': 0.3, 'build': 0.8, 'drop': 1.5, 'break': 0.4, 'outro': 0.35},
'Dry/Wet': {'intro': 0.05, 'build': 0.10, 'drop': 0.15, 'break': 0.08, 'outro': 0.06},
},
},
'vocal': {
'Echo': {
'Dry/Wet': {'intro': 0.06, 'build': 0.10, 'drop': 0.05, 'break': 0.15, 'outro': 0.08},
'Feedback': {'intro': 0.25, 'build': 0.38, 'drop': 0.28, 'break': 0.45, 'outro': 0.30},
},
'Compressor': {
'Threshold': {'intro': -16.0, 'build': -17.0, 'drop': -19.0, 'break': -15.0, 'outro': -15.5},
'Ratio': {'intro': 2.8, 'build': 3.2, 'drop': 3.8, 'break': 2.5, 'outro': 2.7},
},
'Hybrid Reverb': {
'Dry/Wet': {'intro': 0.04, 'build': 0.08, 'drop': 0.03, 'break': 0.12, 'outro': 0.06},
'Decay Time': {'intro': 2.0, 'build': 2.8, 'drop': 1.5, 'break': 3.5, 'outro': 2.5},
},
'Auto Filter': {
'Frequency': {'intro': 8500.0, 'build': 10500.0, 'drop': 13000.0, 'break': 7200.0, 'outro': 8000.0},
'Dry/Wet': {'intro': 0.06, 'build': 0.10, 'drop': 0.04, 'break': 0.14, 'outro': 0.08},
},
},
'fx': {
'Auto Filter': {
'Frequency': {'intro': 6500.0, 'build': 9500.0, 'drop': 12000.0, 'break': 5500.0, 'outro': 6000.0},
'Dry/Wet': {'intro': 0.12, 'build': 0.10, 'drop': 0.06, 'break': 0.18, 'outro': 0.14},
'Resonance': {'intro': 0.15, 'build': 0.22, 'drop': 0.12, 'break': 0.28, 'outro': 0.18},
},
'Hybrid Reverb': {
'Dry/Wet': {'intro': 0.15, 'build': 0.18, 'drop': 0.10, 'break': 0.22, 'outro': 0.16},
'Decay Time': {'intro': 2.5, 'build': 3.2, 'drop': 2.0, 'break': 4.0, 'outro': 3.0},
},
'Limiter': {
'Gain': {'intro': -0.2, 'build': 0.0, 'drop': 0.2, 'break': -0.3, 'outro': -0.1},
},
'Saturator': {
'Drive': {'intro': 0.5, 'build': 1.2, 'drop': 2.0, 'break': 0.8, 'outro': 0.6},
'Dry/Wet': {'intro': 0.08, 'build': 0.12, 'drop': 0.18, 'break': 0.10, 'outro': 0.10},
},
},
}
# =============================================================================
# ENHANCED MASTER Device Automation - Section Energy Response
# =============================================================================
MASTER_DEVICE_AUTOMATION = {
'Utility': {
'Stereo Width': {'intro': 1.04, 'build': 1.08, 'drop': 1.10, 'break': 1.12, 'outro': 1.06},
'Gain': {'intro': 0.6, 'build': 0.8, 'drop': 1.0, 'break': 0.5, 'outro': 0.5},
},
'Saturator': {
'Drive': {'intro': 0.2, 'build': 0.35, 'drop': 0.5, 'break': 0.15, 'outro': 0.18},
'Dry/Wet': {'intro': 0.10, 'build': 0.18, 'drop': 0.25, 'break': 0.08, 'outro': 0.12},
},
'Compressor': {
'Ratio': {'intro': 0.55, 'build': 0.62, 'drop': 0.70, 'break': 0.50, 'outro': 0.52},
'Threshold': {'intro': -10.0, 'build': -12.0, 'drop': -14.0, 'break': -8.0, 'outro': -9.0},
'Attack': {'intro': 0.020, 'build': 0.015, 'drop': 0.010, 'break': 0.025, 'outro': 0.022},
'Release': {'intro': 0.15, 'build': 0.12, 'drop': 0.08, 'break': 0.18, 'outro': 0.16},
},
'Limiter': {
'Gain': {'intro': 1.0, 'build': 1.2, 'drop': 1.4, 'break': 0.9, 'outro': 0.95},
'Ceiling': {'intro': -0.5, 'build': -0.8, 'drop': -1.0, 'break': -0.3, 'outro': -0.4},
},
'Auto Filter': {
'Frequency': {'intro': 8000.0, 'build': 11000.0, 'drop': 15000.0, 'break': 6000.0, 'outro': 7000.0},
'Dry/Wet': {'intro': 0.05, 'build': 0.03, 'drop': 0.02, 'break': 0.08, 'outro': 0.06},
},
'Echo': {
'Dry/Wet': {'intro': 0.02, 'build': 0.06, 'drop': 0.04, 'break': 0.08, 'outro': 0.04},
'Feedback': {'intro': 0.15, 'build': 0.28, 'drop': 0.20, 'break': 0.32, 'outro': 0.22},
},
}
# Safety clamps for device parameters to prevent extreme values
DEVICE_PARAMETER_SAFETY_CLAMPS = {
'Drive': {'min': 0.0, 'max': 6.0},
'Frequency': {'min': 20.0, 'max': 20000.0},
'Dry/Wet': {'min': 0.0, 'max': 1.0},
'Feedback': {'min': 0.0, 'max': 0.7},
'Stereo Width': {'min': 0.0, 'max': 1.3},
'Resonance': {'min': 0.0, 'max': 1.0},
'Ratio': {'min': 1.0, 'max': 20.0},
'Threshold': {'min': -60.0, 'max': 0.0},
'Attack': {'min': 0.0001, 'max': 0.5},
'Release': {'min': 0.001, 'max': 2.0},
'Gain': {'min': -1.0, 'max': 1.8},
'Decay Time': {'min': 0.1, 'max': 10.0},
}
MASTER_SAFETY_CLAMPS = {
'Stereo Width': {'min': 0.0, 'max': 1.25},
'Drive': {'min': 0.0, 'max': 1.5},
'Ratio': {'min': 0.45, 'max': 0.9},
'Gain': {'min': 0.0, 'max': 1.6},
'Attack': {'min': 0.0001, 'max': 0.1},
'Ceiling': {'min': -3.0, 'max': 0.0},
}

View File

@@ -1,170 +0,0 @@
import json
import socket
from datetime import datetime
LOG_FILE = r"C:\Users\ren\Documents\Ableton\Logs\fx_group.txt"
def log(msg):
timestamp = datetime.now().isoformat()
entry = f"[{timestamp}] {msg}"
print(entry)
with open(LOG_FILE, "a", encoding="utf-8") as f:
f.write(entry + "\n")
class AbletonSocketClient:
def __init__(self, host="127.0.0.1", port=9877, timeout=30.0):
self.host = host
self.port = port
self.timeout = timeout
def send(self, command_type, params=None):
payload = json.dumps({
"type": command_type,
"params": params or {},
}).encode("utf-8") + b"\n"
with socket.create_connection((self.host, self.port), timeout=self.timeout) as sock:
sock.sendall(payload)
reader = sock.makefile("r", encoding="utf-8")
try:
line = reader.readline()
finally:
reader.close()
try:
sock.shutdown(socket.SHUT_RDWR)
except OSError:
pass
if not line:
raise RuntimeError(f"No response for command: {command_type}")
return json.loads(line)
def set_input_routing(client, track_index, routing_name):
result = client.send("set_track_input_routing", {
"index": track_index,
"routing_name": routing_name
})
return result
def main():
log("=" * 60)
log("FX GROUP - TRANSITION FX LOADER")
log("=" * 60)
client = AbletonSocketClient()
RISER_TRACK = 20
DOWNLIFTER_TRACK = 21
CRASH_TRACK = 22
IMPACT_TRACK = 23
NOISE_TRACK = 24
REVERSE_TRACK = 25
RISER_PATH = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\textures\fx\BBH - Primer Impacto -Risers 1.wav"
DOWNLIFTER_PATH = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\all_tracks\BBH - Primer Impacto -Downfilters 1.wav"
CRASH_PATH = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\oneshots\fx\BBH - Primer Impacto - Crash 2.wav"
IMPACT_PATH = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\oneshots\fx\BBH - Primer Impacto -Impact 1.wav"
NOISE_PATH = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\all_tracks\EFX_01_Em_125.wav"
REVERSE_PATH = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\textures\fx\BBH - Primer Impacto -Risers 4.wav"
RISER_POSITIONS = [14, 46, 78, 110, 142, 174]
DOWNLIFTER_POSITIONS = [16, 48, 80, 112, 144, 176]
CRASH_POSITIONS = [0, 32, 64, 96, 128, 160, 192]
IMPACT_POSITIONS = [16, 48, 80, 112, 144]
NOISE_POSITIONS = [14, 46, 78, 110, 142, 174]
REVERSE_POSITIONS = [14, 30, 62, 94, 126]
log(f"Track indices:")
log(f" RISER={RISER_TRACK}, DOWNLIFTER={DOWNLIFTER_TRACK}, CRASH={CRASH_TRACK}")
log(f" IMPACT={IMPACT_TRACK}, NOISE={NOISE_TRACK}, REVERSE={REVERSE_TRACK}")
log("")
log("Step 1: Placing RISER samples...")
log(f" Positions: {RISER_POSITIONS}")
log(f" File: {RISER_PATH}")
result = client.send("create_arrangement_audio_pattern", {
"track_index": RISER_TRACK,
"file_path": RISER_PATH,
"positions": RISER_POSITIONS,
"name": "RISER FX"
})
log(f" Result: {json.dumps(result, indent=2)}")
log("")
log("Step 2: Placing DOWNLIFTER samples...")
log(f" Positions: {DOWNLIFTER_POSITIONS}")
log(f" File: {DOWNLIFTER_PATH}")
result = client.send("create_arrangement_audio_pattern", {
"track_index": DOWNLIFTER_TRACK,
"file_path": DOWNLIFTER_PATH,
"positions": DOWNLIFTER_POSITIONS,
"name": "DOWNLIFTER FX"
})
log(f" Result: {json.dumps(result, indent=2)}")
log("")
log("Step 3: Placing CRASH samples...")
log(f" Positions: {CRASH_POSITIONS}")
log(f" File: {CRASH_PATH}")
result = client.send("create_arrangement_audio_pattern", {
"track_index": CRASH_TRACK,
"file_path": CRASH_PATH,
"positions": CRASH_POSITIONS,
"name": "CRASH FX"
})
log(f" Result: {json.dumps(result, indent=2)}")
log("")
log("Step 4: Placing IMPACT samples...")
log(f" Positions: {IMPACT_POSITIONS}")
log(f" File: {IMPACT_PATH}")
result = client.send("create_arrangement_audio_pattern", {
"track_index": IMPACT_TRACK,
"file_path": IMPACT_PATH,
"positions": IMPACT_POSITIONS,
"name": "IMPACT FX"
})
log(f" Result: {json.dumps(result, indent=2)}")
log("")
log("Step 5: Placing NOISE SWEEP samples...")
log(f" Positions: {NOISE_POSITIONS}")
log(f" File: {NOISE_PATH}")
result = client.send("create_arrangement_audio_pattern", {
"track_index": NOISE_TRACK,
"file_path": NOISE_PATH,
"positions": NOISE_POSITIONS,
"name": "NOISE FX"
})
log(f" Result: {json.dumps(result, indent=2)}")
log("")
log("Step 6: Placing REVERSE FX samples...")
log(f" Positions: {REVERSE_POSITIONS}")
log(f" File: {REVERSE_PATH}")
result = client.send("create_arrangement_audio_pattern", {
"track_index": REVERSE_TRACK,
"file_path": REVERSE_PATH,
"positions": REVERSE_POSITIONS,
"name": "REVERSE FX"
})
log(f" Result: {json.dumps(result, indent=2)}")
log("")
log("=" * 60)
log("Setting input routing to 'No Input' for all FX tracks...")
log("=" * 60)
for track_idx, track_name in [(RISER_TRACK, "RISER"), (DOWNLIFTER_TRACK, "DOWNLIFTER"),
(CRASH_TRACK, "CRASH"), (IMPACT_TRACK, "IMPACT"),
(NOISE_TRACK, "NOISE SWEEP"), (REVERSE_TRACK, "REVERSE FX")]:
result = set_input_routing(client, track_idx, "No Input")
log(f" {track_name} (track {track_idx}): {result}")
log("")
log("=" * 60)
log("FX GROUP COMPLETE")
log("=" * 60)
if __name__ == "__main__":
main()

View File

@@ -1,264 +0,0 @@
"""
reference_stem_builder.py - Rebuild an Ableton arrangement directly from a reference track.
"""
from __future__ import annotations
import json
import logging
import socket
from pathlib import Path
from typing import Any, Dict, List, Tuple
import soundfile as sf
import torch
from demucs.apply import apply_model
from demucs.pretrained import get_model
try:
import librosa
except ImportError: # pragma: no cover
librosa = None
try:
from reference_listener import ReferenceAudioListener
except ImportError: # pragma: no cover
from .reference_listener import ReferenceAudioListener
logger = logging.getLogger("ReferenceStemBuilder")
HOST = "127.0.0.1"
PORT = 9877
MESSAGE_TERMINATOR = b"\n"
SCRIPT_DIR = Path(__file__).resolve().parent
PACKAGE_DIR = SCRIPT_DIR.parent
PROJECT_SAMPLES_DIR = PACKAGE_DIR.parent / "librerias" / "organized_samples"
SAMPLES_DIR = str(PROJECT_SAMPLES_DIR)
TRACK_LAYOUT = (
("REFERENCE FULL", 59, 0.72, True),
("REF DRUMS", 10, 0.84, False),
("REF BASS", 30, 0.82, False),
("REF OTHER", 50, 0.68, False),
("REF VOCALS", 40, 0.70, False),
)
SECTION_BLUEPRINTS = {
"club": [
("INTRO DJ", 16),
("GROOVE A", 16),
("VOCAL BUILD", 8),
("DROP A", 16),
("BREAKDOWN", 8),
("BUILD B", 8),
("DROP B", 16),
("PEAK", 8),
("OUTRO DJ", 16),
],
"standard": [
("INTRO", 8),
("BUILD", 8),
("DROP A", 16),
("BREAK", 8),
("DROP B", 16),
("OUTRO", 8),
],
}
class AbletonSocketClient:
def __init__(self, host: str = HOST, port: int = PORT):
self.host = host
self.port = port
def send(self, command_type: str, params: Dict[str, Any] | None = None, timeout: float = 30.0) -> Dict[str, Any]:
payload = json.dumps({"type": command_type, "params": params or {}}, separators=(",", ":")).encode("utf-8") + MESSAGE_TERMINATOR
with socket.create_connection((self.host, self.port), timeout=timeout) as sock:
sock.sendall(payload)
data = b""
while not data.endswith(MESSAGE_TERMINATOR):
chunk = sock.recv(65536)
if not chunk:
break
data += chunk
if not data:
raise RuntimeError(f"Sin respuesta para {command_type}")
return json.loads(data.decode("utf-8", errors="replace").strip())
def _resolve_reference_profile(reference_path: Path) -> Dict[str, Any]:
listener = ReferenceAudioListener(SAMPLES_DIR)
analysis = listener.analyze_reference(str(reference_path))
structure = "club" if analysis.get("duration", 0.0) >= 180 else "standard"
return {
"tempo": float(analysis.get("tempo", 128.0) or 128.0),
"key": str(analysis.get("key", "") or ""),
"duration": float(analysis.get("duration", 0.0) or 0.0),
"structure": structure,
"listener_device": analysis.get("device", "cpu"),
}
def ensure_reference_wav(reference_path: Path) -> Path:
if reference_path.suffix.lower() == ".wav":
return reference_path
if librosa is None:
raise RuntimeError("librosa no está disponible para convertir la referencia a WAV")
wav_path = reference_path.with_suffix(".wav")
if wav_path.exists() and wav_path.stat().st_size > 0:
return wav_path
y, sr = librosa.load(str(reference_path), sr=44100, mono=False)
if y.ndim == 1:
y = y.reshape(1, -1)
sf.write(str(wav_path), y.T, sr, subtype="PCM_16")
return wav_path
def separate_stems(reference_wav: Path, output_dir: Path) -> Dict[str, Path]:
output_dir.mkdir(parents=True, exist_ok=True)
stem_root = output_dir / reference_wav.stem
expected = {
"reference": reference_wav,
"drums": stem_root / "drums.wav",
"bass": stem_root / "bass.wav",
"other": stem_root / "other.wav",
"vocals": stem_root / "vocals.wav",
}
if all(path.exists() and path.stat().st_size > 0 for path in expected.values()):
return expected
audio, sr = sf.read(str(reference_wav), always_2d=True)
if sr != 44100:
raise RuntimeError(f"Sample rate inesperado en referencia WAV: {sr}")
model = get_model("htdemucs")
model.cpu()
model.eval()
waveform = torch.tensor(audio.T, dtype=torch.float32)
separated = apply_model(model, waveform[None], device="cpu", progress=False)[0]
stem_root.mkdir(parents=True, exist_ok=True)
for stem_name, tensor in zip(model.sources, separated):
stem_path = stem_root / f"{stem_name}.wav"
sf.write(str(stem_path), tensor.detach().cpu().numpy().T, sr, subtype="PCM_16")
return expected
def _sections_for_structure(structure: str) -> List[Tuple[str, int]]:
return list(SECTION_BLUEPRINTS.get(structure.lower(), SECTION_BLUEPRINTS["standard"]))
def _create_track(client: AbletonSocketClient, name: str, color: int, volume: float) -> int:
response = client.send("create_track", {"type": "audio", "index": -1})
if response.get("status") != "success":
raise RuntimeError(response.get("message", f"No se pudo crear {name}"))
track_index = int(response.get("result", {}).get("index"))
client.send("set_track_name", {"index": track_index, "name": name})
client.send("set_track_color", {"index": track_index, "color": color})
client.send("set_track_volume", {"index": track_index, "volume": volume})
return track_index
def _import_full_length_audio(client: AbletonSocketClient, track_index: int, file_path: Path, name: str) -> None:
response = client.send("create_arrangement_audio_pattern", {
"track_index": track_index,
"file_path": str(file_path),
"positions": [0.0],
"name": name,
}, timeout=120.0)
if response.get("status") != "success":
raise RuntimeError(response.get("message", f"No se pudo importar {name}"))
def _prepare_navigation_scenes(client: AbletonSocketClient, structure: str) -> None:
sections = _sections_for_structure(structure)
session_info = client.send("get_session_info")
if session_info.get("status") != "success":
return
scene_count = int(session_info.get("result", {}).get("num_scenes", 0) or 0)
target_count = len(sections)
while scene_count < target_count:
create_response = client.send("create_scene", {"index": -1})
if create_response.get("status") != "success":
break
scene_count += 1
while scene_count > target_count and scene_count > 1:
delete_response = client.send("delete_scene", {"index": scene_count - 1})
if delete_response.get("status") != "success":
break
scene_count -= 1
for scene_index, (section_name, _) in enumerate(sections):
client.send("set_scene_name", {"index": scene_index, "name": section_name})
def rebuild_project_from_reference(reference_path: Path) -> Dict[str, Any]:
reference_path = reference_path.resolve()
if not reference_path.exists():
raise FileNotFoundError(reference_path)
profile = _resolve_reference_profile(reference_path)
reference_wav = ensure_reference_wav(reference_path)
stems = separate_stems(reference_wav, reference_path.parent / "stems")
client = AbletonSocketClient()
clear_response = client.send("clear_project", {"keep_tracks": 0}, timeout=120.0)
if clear_response.get("status") != "success":
raise RuntimeError(clear_response.get("message", "No se pudo limpiar el proyecto"))
client.send("stop", {})
client.send("set_tempo", {"tempo": round(profile["tempo"], 3)})
client.send("show_arrangement_view", {})
client.send("jump_to", {"time": 0})
created = []
for (track_name, color, volume, muted), stem_key in zip(TRACK_LAYOUT, ("reference", "drums", "bass", "other", "vocals")):
track_index = _create_track(client, track_name, color, volume)
_import_full_length_audio(client, track_index, stems[stem_key], track_name)
if muted:
client.send("set_track_mute", {"index": track_index, "mute": True})
created.append({
"track_index": track_index,
"name": track_name,
"file_path": str(stems[stem_key]),
})
_prepare_navigation_scenes(client, profile["structure"])
client.send("loop_selection", {"start": 0, "length": max(32.0, round(profile["duration"] * profile["tempo"] / 60.0, 3)), "enable": False})
client.send("jump_to", {"time": 0})
client.send("show_arrangement_view", {})
session_info = client.send("get_session_info")
return {
"reference": str(reference_path),
"tempo": profile["tempo"],
"key": profile["key"],
"structure": profile["structure"],
"listener_device": profile["listener_device"],
"stems": created,
"session_info": session_info.get("result", {}),
}
def main() -> int:
import argparse
parser = argparse.ArgumentParser(description="Rebuild an Ableton project directly from a reference track.")
parser.add_argument("reference_path", help="Absolute or relative path to the reference audio file")
args = parser.parse_args()
result = rebuild_project_from_reference(Path(args.reference_path))
print(json.dumps(result, indent=2, ensure_ascii=False))
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -1,13 +0,0 @@
# Dependencias de AbletonMCP-AI Server
# Instalar con: pip install -r requirements.txt
mcp>=1.0.0
# Servidor MCP FastMCP
# Opcional: para análisis de audio avanzado
# numpy>=1.24.0
# librosa>=0.10.0
# Opcional: para procesamiento con GPU AMD
# torch==2.4.1
# torch-directml>=0.2.5

View File

@@ -1,525 +0,0 @@
"""
retrieval_benchmark.py - Offline benchmark harness for retrieval quality inspection.
Analyzes reference tracks and outputs top-N candidates per role to help spot
role contamination and evaluate retrieval quality.
Usage:
python retrieval_benchmark.py --reference "path/to/track.mp3"
python retrieval_benchmark.py --reference "track1.mp3" "track2.mp3" --top-n 10
python retrieval_benchmark.py --reference "track.mp3" --output results.json --format json
python retrieval_benchmark.py --reference "track.mp3" --output results.md --format markdown
"""
from __future__ import annotations
import argparse
import json
import logging
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Any, Dict, List, Optional
# Add parent directory to path for imports when running as script
sys.path.insert(0, str(Path(__file__).parent))
from reference_listener import ReferenceAudioListener, ROLE_SEGMENT_SETTINGS
logger = logging.getLogger(__name__)
def _default_library_dir() -> Path:
"""Get the default library directory."""
return Path(__file__).resolve().parents[2] / "librerias" / "all_tracks"
def run_benchmark(
reference_paths: List[str],
library_dir: Path,
top_n: int = 10,
roles: Optional[List[str]] = None,
duration_limit: Optional[float] = None,
) -> Dict[str, Any]:
"""
Run retrieval benchmark on one or more reference tracks.
Args:
reference_paths: List of paths to reference audio files
library_dir: Path to the sample library
top_n: Number of top candidates to show per role
roles: Optional list of specific roles to analyze
duration_limit: Optional duration limit for analysis
Returns:
Dict containing benchmark results for each reference
"""
listener = ReferenceAudioListener(str(library_dir))
all_roles = list(ROLE_SEGMENT_SETTINGS.keys())
target_roles = [r for r in (roles or all_roles) if r in all_roles]
results = {
"benchmark_info": {
"library_dir": str(library_dir),
"top_n": top_n,
"roles": target_roles,
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%S"),
"device": listener.device_name,
},
"references": [],
}
for ref_path in reference_paths:
ref_path = Path(ref_path)
if not ref_path.exists():
logger.warning("Reference file not found: %s", ref_path)
continue
logger.info("Analyzing reference: %s", ref_path.name)
try:
start_time = time.time()
# Run match_assets to get candidates per role
match_result = listener.match_assets(str(ref_path))
reference_info = match_result.get("reference", {})
matches = match_result.get("matches", {})
elapsed = time.time() - start_time
ref_result = {
"file_name": ref_path.name,
"path": str(ref_path),
"analysis_time_seconds": round(elapsed, 2),
"reference_info": {
"tempo": reference_info.get("tempo"),
"key": reference_info.get("key"),
"duration": reference_info.get("duration"),
"rms_mean": reference_info.get("rms_mean"),
"onset_mean": reference_info.get("onset_mean"),
"spectral_centroid": reference_info.get("spectral_centroid"),
},
"sections": [
{
"kind": s.get("kind"),
"start": s.get("start"),
"end": s.get("end"),
"bars": s.get("bars"),
}
for s in match_result.get("reference_sections", [])
],
"role_candidates": {},
}
# Process each role
for role in target_roles:
role_matches = matches.get(role, [])
top_candidates = role_matches[:top_n]
ref_result["role_candidates"][role] = {
"total_available": len(role_matches),
"top_candidates": [
{
"rank": i + 1,
"file_name": c.get("file_name"),
"path": c.get("path"),
"score": c.get("score"),
"cosine": c.get("cosine"),
"segment_score": c.get("segment_score"),
"catalog_score": c.get("catalog_score"),
"tempo": c.get("tempo"),
"key": c.get("key"),
"duration": c.get("duration"),
}
for i, c in enumerate(top_candidates)
],
}
results["references"].append(ref_result)
logger.info("Completed analysis in %.2fs", elapsed)
except Exception as e:
logger.error("Failed to analyze %s: %s", ref_path, e, exc_info=True)
results["references"].append({
"file_name": ref_path.name,
"path": str(ref_path),
"error": str(e),
})
return results
def analyze_role_contamination(results: Dict[str, Any]) -> Dict[str, Any]:
"""
Analyze results for potential role contamination issues.
Returns a dict with contamination analysis:
- files appearing in multiple roles
- misnamed files (e.g., "bass" appearing in "kick" role)
- score distribution anomalies
"""
contamination = {
"cross_role_files": [],
"potential_mismatches": [],
"role_score_stats": {},
}
# Track files appearing in multiple roles
file_to_roles: Dict[str, List[Dict[str, Any]]] = defaultdict(list)
for ref in results.get("references", []):
ref_name = ref.get("file_name", "unknown")
for role, role_data in ref.get("role_candidates", {}).items():
for candidate in role_data.get("top_candidates", []):
file_name = candidate.get("file_name", "")
if file_name:
file_to_roles[file_name].append({
"reference": ref_name,
"role": role,
"rank": candidate.get("rank"),
"score": candidate.get("score"),
})
# Find files appearing in multiple roles
for file_name, appearances in file_to_roles.items():
unique_roles = set(a["role"] for a in appearances)
if len(unique_roles) > 1:
contamination["cross_role_files"].append({
"file_name": file_name,
"roles": list(unique_roles),
"appearances": appearances,
})
# Check for potential mismatches (filename suggests different role)
role_keywords = {
"kick": ["kick"],
"snare": ["snare", "clap"],
"hat": ["hat", "hihat", "hi-hat"],
"bass_loop": ["bass", "sub", "808"],
"perc_loop": ["perc", "percussion", "conga", "bongo"],
"top_loop": ["top", "drum loop", "full drum"],
"synth_loop": ["synth", "lead", "pad", "chord", "arp"],
"vocal_loop": ["vocal", "vox", "acapella"],
"crash_fx": ["crash", "cymbal", "impact"],
"fill_fx": ["fill", "transition", "tom"],
"snare_roll": ["roll", "snareroll"],
"atmos_fx": ["atmos", "drone", "ambient", "texture"],
"vocal_shot": ["shot", "vocal shot", "chop"],
}
for ref in results.get("references", []):
for role, role_data in ref.get("role_candidates", {}).items():
for candidate in role_data.get("top_candidates", []):
file_name = candidate.get("file_name", "").lower()
if not file_name:
continue
# Check if file name suggests a different role
expected_keywords = role_keywords.get(role, [])
other_role_matches = []
for other_role, keywords in role_keywords.items():
if other_role == role:
continue
if any(kw in file_name for kw in keywords):
other_role_matches.append(other_role)
if other_role_matches and expected_keywords:
# File name matches another role but not this one
if not any(kw in file_name for kw in expected_keywords):
contamination["potential_mismatches"].append({
"file_name": candidate.get("file_name"),
"assigned_role": role,
"rank": candidate.get("rank"),
"score": candidate.get("score"),
"suggested_roles": other_role_matches,
})
# Calculate score distribution per role
for ref in results.get("references", []):
for role, role_data in ref.get("role_candidates", {}).items():
scores = [
c.get("score", 0)
for c in role_data.get("top_candidates", [])
if c.get("score") is not None
]
if scores:
contamination["role_score_stats"][role] = {
"min": round(min(scores), 4),
"max": round(max(scores), 4),
"avg": round(sum(scores) / len(scores), 4),
"count": len(scores),
}
return contamination
def format_output_json(results: Dict[str, Any]) -> str:
"""Format results as JSON string."""
return json.dumps(results, indent=2, ensure_ascii=False)
def format_output_markdown(results: Dict[str, Any]) -> str:
"""Format results as markdown string."""
lines = []
# Header
lines.append("# Retrieval Benchmark Report")
lines.append("")
lines.append(f"**Generated:** {results['benchmark_info']['timestamp']}")
lines.append(f"**Library:** `{results['benchmark_info']['library_dir']}`")
lines.append(f"**Top N:** {results['benchmark_info']['top_n']}")
lines.append(f"**Device:** {results['benchmark_info']['device']}")
lines.append("")
# Process each reference
for ref in results.get("references", []):
lines.append(f"## Reference: {ref.get('file_name', 'unknown')}")
lines.append("")
# Error case
if "error" in ref:
lines.append(f"**Error:** {ref['error']}")
lines.append("")
continue
# Reference info
ref_info = ref.get("reference_info", {})
lines.append("### Reference Analysis")
lines.append("")
lines.append("| Property | Value |")
lines.append("|----------|-------|")
lines.append(f"| Tempo | {ref_info.get('tempo', 'N/A')} BPM |")
lines.append(f"| Key | {ref_info.get('key', 'N/A')} |")
lines.append(f"| Duration | {ref_info.get('duration', 'N/A')}s |")
lines.append(f"| RMS Mean | {ref_info.get('rms_mean', 'N/A')} |")
lines.append(f"| Onset Mean | {ref_info.get('onset_mean', 'N/A')} |")
lines.append(f"| Spectral Centroid | {ref_info.get('spectral_centroid', 'N/A')} Hz |")
lines.append("")
# Sections
sections = ref.get("sections", [])
if sections:
lines.append("### Detected Sections")
lines.append("")
lines.append("| Type | Start | End | Bars |")
lines.append("|------|-------|-----|------|")
for s in sections:
lines.append(f"| {s.get('kind', 'N/A')} | {s.get('start', 'N/A')}s | {s.get('end', 'N/A')}s | {s.get('bars', 'N/A')} |")
lines.append("")
# Role candidates
lines.append("### Top Candidates per Role")
lines.append("")
for role, role_data in ref.get("role_candidates", {}).items():
total = role_data.get("total_available", 0)
lines.append(f"#### {role} ({total} available)")
lines.append("")
candidates = role_data.get("top_candidates", [])
if not candidates:
lines.append("*No candidates found*")
lines.append("")
continue
lines.append("| Rank | File | Score | Cosine | Seg | Catalog | Tempo | Key | Duration |")
lines.append("|------|------|-------|--------|-----|---------|-------|-----|----------|")
for c in candidates:
lines.append(
f"| {c.get('rank', 'N/A')} | "
f"`{c.get('file_name', 'N/A')[:40]}` | "
f"{c.get('score', 0):.4f} | "
f"{c.get('cosine', 0):.4f} | "
f"{c.get('segment_score', 0):.4f} | "
f"{c.get('catalog_score', 0):.4f} | "
f"{c.get('tempo', 'N/A')} | "
f"{c.get('key', 'N/A')} | "
f"{c.get('duration', 'N/A'):.2f}s |"
)
lines.append("")
# Contamination analysis
if "contamination_analysis" in results:
contam = results["contamination_analysis"]
lines.append("## Role Contamination Analysis")
lines.append("")
# Cross-role files
cross_role = contam.get("cross_role_files", [])
if cross_role:
lines.append("### Files Appearing in Multiple Roles")
lines.append("")
for item in cross_role:
lines.append(f"- **{item['file_name']}**")
lines.append(f" - Roles: {', '.join(item['roles'])}")
for app in item["appearances"]:
lines.append(f" - {app['role']}: rank {app['rank']}, score {app['score']:.4f}")
lines.append("")
# Potential mismatches
mismatches = contam.get("potential_mismatches", [])
if mismatches:
lines.append("### Potential Role Mismatches")
lines.append("")
lines.append("Files whose names suggest a different role than assigned:")
lines.append("")
for item in mismatches:
lines.append(f"- **{item['file_name']}**")
lines.append(f" - Assigned: {item['assigned_role']} (rank {item['rank']}, score {item['score']:.4f})")
lines.append(f" - Suggested: {', '.join(item['suggested_roles'])}")
lines.append("")
# Score stats
score_stats = contam.get("role_score_stats", {})
if score_stats:
lines.append("### Score Distribution per Role")
lines.append("")
lines.append("| Role | Min | Max | Avg | Count |")
lines.append("|------|-----|-----|-----|-------|")
for role, stats in sorted(score_stats.items()):
lines.append(
f"| {role} | {stats['min']:.4f} | {stats['max']:.4f} | "
f"{stats['avg']:.4f} | {stats['count']} |"
)
lines.append("")
return "\n".join(lines)
def main() -> int:
parser = argparse.ArgumentParser(
description="Offline benchmark harness for retrieval quality inspection.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
%(prog)s --reference "track.mp3"
%(prog)s --reference "track1.mp3" "track2.mp3" --top-n 15
%(prog)s --reference "track.mp3" --output results.md --format markdown
%(prog)s --reference "track.mp3" --roles kick snare hat --top-n 20
""",
)
parser.add_argument(
"--reference", "-r",
nargs="+",
required=True,
help="One or more reference audio files to analyze",
)
parser.add_argument(
"--library-dir",
default=str(_default_library_dir()),
help="Audio library directory (default: ../librerias/all_tracks)",
)
parser.add_argument(
"--top-n", "-n",
type=int,
default=10,
help="Number of top candidates to show per role (default: 10)",
)
parser.add_argument(
"--roles",
nargs="*",
default=None,
help="Specific roles to analyze (default: all roles)",
)
parser.add_argument(
"--output", "-o",
type=str,
default=None,
help="Output file path for results",
)
parser.add_argument(
"--format", "-f",
choices=["json", "markdown", "md"],
default=None,
help="Output format (json or markdown). Auto-detected from output file extension if not specified.",
)
parser.add_argument(
"--analyze-contamination",
action="store_true",
help="Include role contamination analysis in output",
)
parser.add_argument(
"--verbose", "-v",
action="store_true",
help="Enable verbose logging",
)
parser.add_argument(
"--duration-limit",
type=float,
default=None,
help="Optional duration limit for audio analysis",
)
args = parser.parse_args()
# Configure logging
if args.verbose:
logging.basicConfig(level=logging.DEBUG, format="%(levelname)s: %(message)s")
else:
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
# Validate reference files
reference_paths = []
for ref in args.reference:
ref_path = Path(ref)
if ref_path.exists():
reference_paths.append(str(ref_path))
else:
logger.warning("Reference file not found: %s", ref)
if not reference_paths:
logger.error("No valid reference files provided")
return 1
# Run benchmark
logger.info("Running retrieval benchmark on %d reference(s)", len(reference_paths))
results = run_benchmark(
reference_paths=reference_paths,
library_dir=Path(args.library_dir),
top_n=args.top_n,
roles=args.roles,
duration_limit=args.duration_limit,
)
# Add contamination analysis if requested
if args.analyze_contamination:
logger.info("Analyzing role contamination...")
results["contamination_analysis"] = analyze_role_contamination(results)
# Determine output format
output_format = args.format
if output_format is None and args.output:
output_format = "markdown" if args.output.endswith(".md") else "json"
output_format = output_format or "text"
# Format output
if output_format in ("markdown", "md"):
output_text = format_output_markdown(results)
elif output_format == "json":
output_text = format_output_json(results)
else:
# Plain text summary
output_text = format_output_markdown(results)
# Write to file or stdout
if args.output:
output_path = Path(args.output)
output_path.parent.mkdir(parents=True, exist_ok=True)
output_path.write_text(output_text, encoding="utf-8")
logger.info("Results written to: %s", output_path)
else:
print(output_text)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -1,469 +0,0 @@
"""
role_matcher.py - Phase 4: Role validation and sample matching utilities
This module provides enhanced role matching for sample selection with:
- Role validation based on audio characteristics
- Aggressive sample detection and filtering
- Logging of matching decisions
- Integration with reference_listener and sample_selector
"""
import logging
from typing import Any, Dict, List, Optional
logger = logging.getLogger("RoleMatcher")
# ============================================================================
# CONSTANTS
# ============================================================================
# Valid roles for sample matching with their expected characteristics
VALID_ROLES = {
# One-shot drums
"kick": {"max_duration": 2.0, "min_onset": 0.3, "is_loop": False, "bus": "drums"},
"snare": {"max_duration": 2.0, "min_onset": 0.25, "is_loop": False, "bus": "drums"},
"hat": {"max_duration": 1.5, "min_onset": 0.2, "is_loop": False, "bus": "drums"},
"clap": {"max_duration": 2.0, "min_onset": 0.25, "is_loop": False, "bus": "drums"},
"ride": {"max_duration": 3.0, "min_onset": 0.15, "is_loop": False, "bus": "drums"},
"perc": {"max_duration": 2.5, "min_onset": 0.2, "is_loop": False, "bus": "drums"},
# Loops
"bass_loop": {"min_duration": 2.0, "max_duration": 16.0, "is_loop": True, "bus": "bass"},
"perc_loop": {"min_duration": 2.0, "max_duration": 16.0, "is_loop": True, "bus": "drums"},
"top_loop": {"min_duration": 2.0, "max_duration": 16.0, "is_loop": True, "bus": "drums"},
"synth_loop": {"min_duration": 2.0, "max_duration": 16.0, "is_loop": True, "bus": "music"},
"vocal_loop": {"min_duration": 2.0, "max_duration": 16.0, "is_loop": True, "bus": "vocal"},
# FX
"crash_fx": {"max_duration": 4.0, "is_loop": False, "bus": "fx"},
"fill_fx": {"max_duration": 8.0, "is_loop": False, "bus": "fx"},
"snare_roll": {"max_duration": 8.0, "is_loop": False, "bus": "drums"},
"atmos_fx": {"min_duration": 4.0, "is_loop": True, "bus": "fx"},
"vocal_shot": {"max_duration": 3.0, "is_loop": False, "bus": "vocal"},
# Resample layers
"resample_reverse": {"is_loop": False, "bus": "fx"},
"resample_riser": {"is_loop": False, "bus": "fx"},
"resample_downlifter": {"is_loop": False, "bus": "fx"},
"resample_stutter": {"is_loop": False, "bus": "vocal"},
}
# Keywords that indicate aggressive/hard samples that may be misclassified
AGGRESSIVE_KEYWORDS = {
# Very aggressive kick patterns
"hard", "distorted", "industrial", "slam", "punch", "brutal",
# Potentially misclassified
"subdrop", "impact", "explosion", "destroy",
}
# Keywords that are acceptable for aggressive genres
GENRE_APPROPRIATE_AGGRESSIVE = {
"industrial-techno", "hard-techno", "raw-techno", "psytrance", "dark-techno"
}
# Role aliases for flexible matching
ROLE_ALIASES = {
"kick": ["kick", "bd", "bassdrum", "bass_drum"],
"snare": ["snare", "sd", "snr"],
"clap": ["clap", "cp", "handclap"],
"hat": ["hat", "hihat", "hi_hat", "hhat", "closed_hat", "hat_closed"],
"hat_open": ["open_hat", "hat_open", "ohat", "openhihat"],
"ride": ["ride", "rd", "cymbal"],
"perc": ["perc", "percussion", "percs"],
"bass_loop": ["bass_loop", "bassloop", "bass loop", "sub_bass"],
"perc_loop": ["perc_loop", "percloop", "percussion loop", "perc loop"],
"top_loop": ["top_loop", "toploop", "top loop", "full_drum"],
"synth_loop": ["synth_loop", "synthloop", "synth loop", "chord_loop", "stab"],
"vocal_loop": ["vocal_loop", "vocalloop", "vocal loop", "vox_loop", "vox"],
"crash_fx": ["crash", "crash_fx", "crashfx", "impact_fx"],
"fill_fx": ["fill", "fill_fx", "fillfx", "tom_fill", "transition"],
"snare_roll": ["snare_roll", "snareroll", "snare roll", "snr_roll"],
"atmos_fx": ["atmos", "atmos_fx", "atmosfx", "drone", "pad_fx"],
"vocal_shot": ["vocal_shot", "vocalshot", "vocal shot", "vocal_one_shot"],
}
# Minimum score thresholds for role matching
ROLE_SCORE_THRESHOLDS = {
"kick": 0.35,
"snare": 0.32,
"hat": 0.30,
"clap": 0.32,
"bass_loop": 0.38,
"perc_loop": 0.35,
"top_loop": 0.35,
"synth_loop": 0.36,
"vocal_loop": 0.38,
"crash_fx": 0.30,
"fill_fx": 0.32,
"snare_roll": 0.30,
"atmos_fx": 0.32,
"vocal_shot": 0.34,
}
# ============================================================================
# VALIDATION FUNCTIONS
# ============================================================================
def validate_role_for_sample(
role: str,
sample_data: Dict[str, Any],
genre: Optional[str] = None,
) -> Dict[str, Any]:
"""
Validates if a sample is appropriate for a given role.
Args:
role: The role to validate for (e.g., 'kick', 'bass_loop')
sample_data: Sample metadata with keys like 'duration', 'onset_mean', 'file_name', 'rms_mean'
genre: Optional genre for context-aware aggressive sample handling
Returns:
Dict with keys:
- 'valid' (bool): Whether the sample passes validation
- 'score' (float): Raw validation score (0.0-1.0)
- 'warnings' (list): List of warning messages
- 'adjusted_score' (float): Score after penalties
"""
if role not in VALID_ROLES:
return {"valid": True, "score": 0.5, "warnings": [f"Unknown role: {role}"], "adjusted_score": 0.5}
role_config = VALID_ROLES[role]
warnings: List[str] = []
score = 1.0
duration = float(sample_data.get("duration", 0.0) or 0.0)
onset = float(sample_data.get("onset_mean", 0.0) or 0.0)
file_name = str(sample_data.get("file_name", "") or "").lower()
rms = float(sample_data.get("rms_mean", 0.0) or 0.0)
# Duration validation
if role_config.get("is_loop"):
min_dur = role_config.get("min_duration", 2.0)
max_dur = role_config.get("max_duration", 16.0)
if duration < min_dur:
warnings.append(f"Duration {duration:.1f}s too short for loop role (min {min_dur}s)")
score *= 0.7
elif max_dur and duration > max_dur:
warnings.append(f"Duration {duration:.1f}s too long for role (max {max_dur}s)")
score *= 0.85
else:
max_dur = role_config.get("max_duration", 3.0)
if duration > max_dur:
warnings.append(f"Duration {duration:.1f}s too long for one-shot role (max {max_dur}s)")
score *= 0.75
if "loop" in file_name and role in ["kick", "snare", "hat", "clap"]:
warnings.append("One-shot role has 'loop' in filename")
score *= 0.65
# Onset validation for percussive elements
min_onset = role_config.get("min_onset", 0.0)
if min_onset > 0 and onset < min_onset:
warnings.append(f"Onset {onset:.2f} below minimum {min_onset:.2f}")
score *= 0.85
# Check for aggressive samples that might be misclassified
aggressive_penalty = 1.0
is_aggressive_genre = genre and genre.lower() in GENRE_APPROPRIATE_AGGRESSIVE
for keyword in AGGRESSIVE_KEYWORDS:
if keyword in file_name:
if not is_aggressive_genre:
aggressive_penalty *= 0.88
warnings.append(f"Aggressive keyword '{keyword}' found for non-aggressive genre")
score *= aggressive_penalty
# RMS validation for certain roles
if role in ["kick", "snare", "clap"] and rms > 0.4:
warnings.append(f"High RMS {rms:.3f} for one-shot role")
score *= 0.9
adjusted_score = max(0.1, min(1.0, score))
return {
"valid": score >= 0.4,
"score": score,
"warnings": warnings,
"adjusted_score": adjusted_score,
}
def resolve_role_from_alias(alias: str) -> Optional[str]:
"""
Resolves a role name from various aliases.
Args:
alias: A potential role alias (e.g., 'bd', 'hihat', 'bass loop')
Returns:
The canonical role name or None if not found
"""
alias_lower = alias.lower().strip().replace("-", "_").replace(" ", "_")
# Direct match
if alias_lower in VALID_ROLES:
return alias_lower
# Check aliases
for role, aliases in ROLE_ALIASES.items():
normalized_aliases = [a.lower().replace("-", "_").replace(" ", "_") for a in aliases]
if alias_lower in normalized_aliases:
return role
return None
def get_bus_for_role(role: str) -> str:
"""
Gets the appropriate bus for a role.
Args:
role: The role name
Returns:
Bus name ('drums', 'bass', 'music', 'vocal', or 'fx')
"""
if role in VALID_ROLES:
return VALID_ROLES[role].get("bus", "music")
return "music"
# ============================================================================
# LOGGING FUNCTIONS
# ============================================================================
def log_matching_decision(
role: str,
selected_sample: Optional[Dict[str, Any]],
candidates_count: int,
final_score: float,
validation_result: Optional[Dict[str, Any]] = None,
) -> None:
"""
Logs detailed matching decisions for debugging and analysis.
Args:
role: The role being matched
selected_sample: The selected sample dict or None
candidates_count: Number of candidates considered
final_score: The final matching score
validation_result: Optional validation result dict
"""
if not selected_sample:
logger.info(
f"[MATCH] Role '{role}': No sample selected (0/{candidates_count} candidates)"
)
return
sample_name = selected_sample.get("file_name", "unknown")
sample_tempo = selected_sample.get("tempo", 0.0)
sample_key = selected_sample.get("key", "N/A")
sample_dur = selected_sample.get("duration", 0.0)
log_parts = [
f"[MATCH] Role '{role}':",
f"Sample: {sample_name}",
f"Score: {final_score:.3f}",
f"Tempo: {sample_tempo:.1f}",
f"Key: {sample_key}",
f"Duration: {sample_dur:.1f}s",
f"Candidates: {candidates_count}",
]
if validation_result:
warnings = validation_result.get("warnings", [])
if warnings:
log_parts.append(f"Warnings: {', '.join(warnings)}")
log_parts.append(f"Validated: {validation_result.get('valid', True)}")
logger.info(" | ".join(log_parts))
# ============================================================================
# ENHANCEMENT FUNCTIONS
# ============================================================================
def enhance_sample_matching(
matches: Dict[str, List[Dict[str, Any]]],
reference: Dict[str, Any],
genre: Optional[str] = None,
) -> Dict[str, List[Dict[str, Any]]]:
"""
Enhances sample matching results with validation and filtering.
This function takes raw matches from reference_listener and applies:
1. Role validation based on audio characteristics
2. Aggressive sample filtering
3. Score adjustment based on validation results
Args:
matches: Raw matches from reference_listener (role -> list of sample dicts)
reference: Reference track analysis data
genre: Target genre for context-aware filtering
Returns:
Enhanced matches with validation scores and filtering applied
"""
enhanced: Dict[str, List[Dict[str, Any]]] = {}
for role, candidates in matches.items():
if not candidates:
enhanced[role] = []
continue
threshold = ROLE_SCORE_THRESHOLDS.get(role, 0.30)
enhanced_candidates: List[Dict[str, Any]] = []
for candidate in candidates:
# Create a copy to avoid modifying the original
enhanced_candidate = dict(candidate)
# Validate the sample for this role
validation = validate_role_for_sample(role, candidate, genre)
enhanced_candidate["validation"] = validation
# Apply validation penalty to the score
original_score = float(candidate.get("score", 0.0))
adjusted_score = original_score * validation["adjusted_score"]
enhanced_candidate["adjusted_score"] = round(adjusted_score, 6)
# Filter out samples below threshold
if adjusted_score >= threshold:
enhanced_candidates.append(enhanced_candidate)
else:
logger.debug(
f"[FILTER] Role '{role}': Filtered out '{candidate.get('file_name', 'unknown')}' "
f"(score {adjusted_score:.3f} < threshold {threshold})"
)
# Re-sort by adjusted score
enhanced_candidates.sort(key=lambda x: float(x.get("adjusted_score", 0.0)), reverse=True)
enhanced[role] = enhanced_candidates
# Log summary
filtered_count = len(candidates) - len(enhanced_candidates)
if filtered_count > 0:
logger.info(
f"[ENHANCE] Role '{role}': {len(enhanced_candidates)}/{len(candidates)} candidates passed validation "
f"({filtered_count} filtered out)"
)
return enhanced
def filter_aggressive_samples(
candidates: List[Dict[str, Any]],
genre: Optional[str] = None,
strict: bool = False,
) -> List[Dict[str, Any]]:
"""
Filters out samples with aggressive keywords unless appropriate for the genre.
Args:
candidates: List of sample candidate dicts
genre: Target genre
strict: If True, apply stricter filtering
Returns:
Filtered list of candidates
"""
is_aggressive_genre = genre and genre.lower() in GENRE_APPROPRIATE_AGGRESSIVE
if is_aggressive_genre:
# For aggressive genres, don't filter aggressive samples
return candidates
filtered = []
for candidate in candidates:
file_name = str(candidate.get("file_name", "") or "").lower()
aggressive_count = sum(1 for kw in AGGRESSIVE_KEYWORDS if kw in file_name)
if strict and aggressive_count > 0:
continue
# Apply penalty instead of filtering completely
if aggressive_count > 0:
penalty = 0.85 ** aggressive_count
candidate_copy = dict(candidate)
original_score = float(candidate.get("score", 0.0))
candidate_copy["score"] = original_score * penalty
filtered.append(candidate_copy)
else:
filtered.append(candidate)
return filtered
# ============================================================================
# INTEGRATION HELPERS
# ============================================================================
def create_enhanced_match_report(
role: str,
selected_sample: Optional[Dict[str, Any]],
all_candidates: List[Dict[str, Any]],
validation_result: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""
Creates a detailed report for a matching decision.
Args:
role: The role being matched
selected_sample: The selected sample
all_candidates: All candidates that were considered
validation_result: Validation result for the selected sample
Returns:
A dict with detailed matching report
"""
report = {
"role": role,
"selected": selected_sample is not None,
"candidates_count": len(all_candidates),
"threshold": ROLE_SCORE_THRESHOLDS.get(role, 0.30),
}
if selected_sample:
report["selected_sample"] = {
"name": selected_sample.get("file_name"),
"path": selected_sample.get("path"),
"score": selected_sample.get("score"),
"adjusted_score": selected_sample.get("adjusted_score"),
"tempo": selected_sample.get("tempo"),
"key": selected_sample.get("key"),
"duration": selected_sample.get("duration"),
}
if validation_result:
report["validation"] = {
"valid": validation_result.get("valid"),
"score": validation_result.get("score"),
"warnings": validation_result.get("warnings", []),
}
return report
def get_role_info(role: str) -> Dict[str, Any]:
"""
Gets comprehensive information about a role.
Args:
role: The role name
Returns:
Dict with role information including valid samples count, thresholds, etc.
"""
if role not in VALID_ROLES:
return {"error": f"Unknown role: {role}"}
config = VALID_ROLES[role]
aliases = ROLE_ALIASES.get(role, [])
return {
"role": role,
"config": config,
"aliases": aliases,
"threshold": ROLE_SCORE_THRESHOLDS.get(role, 0.30),
"bus": config.get("bus", "music"),
"is_loop": config.get("is_loop", False),
}

View File

@@ -1,308 +0,0 @@
"""
sample_index.py - Índice y búsqueda de samples para AbletonMCP-AI
Gestiona la librería de samples locales con metadatos extraídos de los nombres.
"""
import json
import logging
from pathlib import Path
from typing import List, Dict, Any, Optional
import re
logger = logging.getLogger("SampleIndex")
class SampleIndex:
"""Índice de samples con búsqueda y metadatos"""
# Categorías por palabras clave
CATEGORIES = {
'kick': ['kick', 'bd', 'bass drum', 'kick drum'],
'snare': ['snare', 'sd', 'snr'],
'clap': ['clap', 'clp'],
'hat': ['hat', 'hh', 'hihat', 'hi-hat', 'closed hat', 'open hat'],
'perc': ['perc', 'percussion', 'conga', 'bongo', 'shaker', 'tamb', 'timb'],
'bass': ['bass', 'bassline', 'sub', '808', ' Reese'],
'synth': ['synth', 'lead', 'pad', 'arp', 'pluck', 'stab', 'chord'],
'vocal': ['vocal', 'vox', 'voice', 'speech', 'talk'],
'fx': ['fx', 'effect', 'sweep', 'riser', 'downlifter', 'impact', 'hit'],
'loop': ['loop', 'full', 'groove'],
}
def __init__(self, base_dir: str):
"""
Inicializa el índice de samples
Args:
base_dir: Directorio base donde buscar samples
"""
self.base_dir = Path(base_dir)
self.samples: List[Dict[str, Any]] = []
self.index_file = self.base_dir / ".sample_index.json"
# Cargar o construir índice
if self.index_file.exists():
self._load_index()
else:
self._build_index()
self._save_index()
def _build_index(self):
"""Construye el índice escaneando el directorio"""
logger.info(f"Construyendo índice de samples en: {self.base_dir}")
extensions = {'.wav', '.aif', '.aiff', '.mp3', '.ogg'}
for file_path in self.base_dir.rglob('*'):
if file_path.suffix.lower() in extensions:
sample_info = self._analyze_sample(file_path)
self.samples.append(sample_info)
logger.info(f"Índice construido: {len(self.samples)} samples encontrados")
def _analyze_sample(self, file_path: Path) -> Dict[str, Any]:
"""Analiza un sample y extrae metadatos del nombre"""
name = file_path.stem
name_lower = name.lower()
# Determinar categoría
category = self._detect_category(name_lower)
# Extraer key del nombre
key = self._extract_key(name)
# Extraer BPM del nombre
bpm = self._extract_bpm(name)
return {
'name': name,
'path': str(file_path),
'category': category,
'key': key,
'bpm': bpm,
'size': file_path.stat().st_size if file_path.exists() else 0,
}
def _detect_category(self, name: str) -> str:
"""Detecta la categoría basada en palabras clave"""
for category, keywords in self.CATEGORIES.items():
for keyword in keywords:
if keyword in name:
return category
return 'unknown'
def _extract_key(self, name: str) -> Optional[str]:
"""Extrae la tonalidad del nombre del archivo"""
# Patrones comunes: "Key A", "in A", "A minor", "Am", "F#m", etc.
patterns = [
r'[_\s\-]([A-G][#b]?m?)\s*(?:minor|major)?[_\s\-]?',
r'[_\s\-]([A-G][#b]?)[_\s\-]',
r'\bin\s+([A-G][#b]?m?)\b',
r'Key\s+([A-G][#b]?m?)',
]
for pattern in patterns:
match = re.search(pattern, name, re.IGNORECASE)
if match:
key = match.group(1)
# Normalizar
key = key.replace('b', '#').replace('Db', 'C#').replace('Eb', 'D#')
key = key.replace('Gb', 'F#').replace('Ab', 'G#').replace('Bb', 'A#')
return key
return None
def _extract_bpm(self, name: str) -> Optional[int]:
"""Extrae el BPM del nombre del archivo"""
# Patrones: "128 BPM", "_128_", "128bpm", etc.
patterns = [
r'[_\s\-](\d{2,3})\s*BPM',
r'[_\s\-](\d{2,3})[_\s\-]',
r'(\d{2,3})bpm',
]
for pattern in patterns:
match = re.search(pattern, name, re.IGNORECASE)
if match:
bpm = int(match.group(1))
if 60 <= bpm <= 200: # Rango razonable
return bpm
return None
def _load_index(self):
"""Carga el índice desde archivo"""
try:
with open(self.index_file, 'r') as f:
data = json.load(f)
self.samples = data.get('samples', [])
logger.info(f"Índice cargado: {len(self.samples)} samples")
except Exception as e:
logger.error(f"Error cargando índice: {e}")
self._build_index()
def _save_index(self):
"""Guarda el índice a archivo"""
try:
with open(self.index_file, 'w') as f:
json.dump({
'samples': self.samples,
'base_dir': str(self.base_dir)
}, f, indent=2)
logger.info(f"Índice guardado en: {self.index_file}")
except Exception as e:
logger.error(f"Error guardando índice: {e}")
def search(self, query: str, category: str = "", limit: int = 10) -> List[Dict[str, Any]]:
"""
Busca samples por query y/o categoría
Args:
query: Término de búsqueda
category: Categoría específica (opcional)
limit: Número máximo de resultados
Returns:
Lista de samples que coinciden
"""
query_lower = query.lower()
results = []
for sample in self.samples:
# Filtrar por categoría si se especificó
if category and sample['category'] != category.lower():
continue
# Buscar en nombre
name = sample['name'].lower()
if query_lower in name:
# Calcular score de relevancia
score = 0
if query_lower == sample.get('category', ''):
score += 10 # Coincidencia exacta de categoría
if query_lower in name.split('_'):
score += 5 # Palabra completa
if name.startswith(query_lower):
score += 3 # Comienza con el término
results.append((score, sample))
# Ordenar por score y limitar
results.sort(key=lambda x: x[0], reverse=True)
return [sample for _, sample in results[:limit]]
def find_by_key(self, key: str, category: str = "", limit: int = 10) -> List[Dict[str, Any]]:
"""Busca samples por tonalidad"""
results = []
for sample in self.samples:
if sample.get('key') == key:
if not category or sample['category'] == category:
results.append(sample)
return results[:limit]
def find_by_bpm(self, bpm: int, tolerance: int = 5, limit: int = 10) -> List[Dict[str, Any]]:
"""Busca samples por BPM con tolerancia"""
results = []
for sample in self.samples:
sample_bpm = sample.get('bpm')
if sample_bpm and abs(sample_bpm - bpm) <= tolerance:
results.append(sample)
return results[:limit]
def get_random_sample(self, category: str = "") -> Optional[Dict[str, Any]]:
"""Obtiene un sample aleatorio, opcionalmente filtrado por categoría"""
import random
samples = self.samples
if category:
samples = [s for s in samples if s['category'] == category]
return random.choice(samples) if samples else None
def get_sample_pack(self, genre: str, key: str = "", bpm: int = 0) -> Dict[str, List[Dict]]:
"""
Obtiene un pack de samples completo para un género
Args:
genre: Género musical
key: Tonalidad preferida
bpm: BPM preferido
Returns:
Dict con samples organizados por categoría
"""
pack = {
'kick': [],
'snare': [],
'hat': [],
'clap': [],
'perc': [],
'bass': [],
'synth': [],
'fx': [],
}
# Seleccionar un sample de cada categoría
for category in pack.keys():
candidates = [s for s in self.samples if s['category'] == category]
# Filtrar por key si se especificó
if key and candidates:
key_matches = [s for s in candidates if s.get('key') == key]
if key_matches:
candidates = key_matches
# Filtrar por BPM si se especificó
if bpm and candidates:
bpm_matches = [s for s in candidates if s.get('bpm')]
if bpm_matches:
# Ordenar por cercanía al BPM objetivo
bpm_matches.sort(key=lambda s: abs(s['bpm'] - bpm))
candidates = bpm_matches[:5] # Top 5 más cercanos
# Seleccionar hasta 3 samples
import random
if candidates:
pack[category] = random.sample(candidates, min(3, len(candidates)))
return pack
def refresh(self):
"""Reconstruye el índice desde cero"""
logger.info("Refrescando índice...")
self._build_index()
self._save_index()
# Función de utilidad para testing
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print("Uso: python sample_index.py <directorio_de_samples>")
sys.exit(1)
logging.basicConfig(level=logging.INFO)
index = SampleIndex(sys.argv[1])
print(f"\nÍndice cargado: {len(index.samples)} samples")
print("\nDistribución por categoría:")
categories = {}
for sample in index.samples:
cat = sample['category']
categories[cat] = categories.get(cat, 0) + 1
for cat, count in sorted(categories.items(), key=lambda x: -x[1]):
print(f" {cat}: {count}")
# Ejemplo de búsqueda
print("\nBúsqueda 'kick':")
for s in index.search("kick", limit=5):
print(f" - {s['name']} ({s.get('key', '?')}, {s.get('bpm', '?')} BPM)")

File diff suppressed because it is too large Load Diff

View File

@@ -1,244 +0,0 @@
"""
Demo del Sistema de Gestión de Samples para AbletonMCP-AI
Este script demuestra las capacidades del sistema completo de samples.
"""
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent))
from sample_manager import get_manager
from sample_selector import get_selector
from audio_analyzer import analyze_sample, AudioAnalyzer
def demo_analyzer():
"""Demostración del analizador de audio"""
print("=" * 60)
print("DEMO: Audio Analyzer")
print("=" * 60)
AudioAnalyzer(backend='basic')
# Analizar un archivo de ejemplo
test_file = r"C:\Users\ren\embeddings\all_tracks\BBH - Primer Impacto - Kick 1.wav"
print(f"\nAnalizando: {Path(test_file).name}")
print("-" * 40)
try:
result = analyze_sample(test_file)
print(f"Tipo detectado: {result['sample_type']}")
print(f"BPM: {result.get('bpm') or 'No detectado'}")
print(f"Key: {result.get('key') or 'No detectado'}")
print(f"Duración: {result['duration']:.3f}s")
print(f"Es percusivo: {result['is_percussive']}")
print(f"Géneros sugeridos: {', '.join(result['suggested_genres'])}")
except Exception as e:
print(f"Error: {e}")
print()
def demo_manager():
"""Demostración del gestor de samples"""
print("=" * 60)
print("DEMO: Sample Manager")
print("=" * 60)
manager = get_manager(r"C:\Users\ren\embeddings\all_tracks")
# Escanear librería
print("\nEscaneando librería...")
stats = manager.scan_directory()
print(f" Samples procesados: {stats['processed']}")
print(f" Nuevos: {stats['added']}")
print(f" Total en librería: {stats['total_samples']}")
# Estadísticas
print("\nEstadísticas:")
stats = manager.get_stats()
print(f" Total: {stats['total_samples']} samples")
print(f" Tamaño: {stats['total_size'] / (1024**2):.1f} MB")
if stats['by_category']:
print("\n Por categoría:")
for cat, count in sorted(stats['by_category'].items(), key=lambda x: -x[1]):
print(f" {cat}: {count}")
if stats['by_key']:
print("\n Por key:")
for key, count in sorted(stats['by_key'].items(), key=lambda x: -x[1]):
print(f" {key}: {count}")
# Búsquedas
print("\nBúsquedas:")
print("-" * 40)
# Buscar kicks
kicks = manager.search(sample_type="kick", limit=3)
print(f"\nKicks encontrados: {len(kicks)}")
for s in kicks:
print(f" - {s.name}")
# Buscar por key
g_sharp = manager.search(key="G#m", limit=3)
print(f"\nSamples en G#m: {len(g_sharp)}")
for s in g_sharp:
print(f" - {s.name} ({s.sample_type})")
# Buscar por BPM
bpm_128 = manager.search(bpm=128, bpm_tolerance=5, limit=3)
print(f"\nSamples ~128 BPM: {len(bpm_128)}")
for s in bpm_128:
key_info = f" [{s.key}]" if s.key else ""
print(f" - {s.name}{key_info}")
print()
def demo_selector():
"""Demostración del selector inteligente"""
print("=" * 60)
print("DEMO: Sample Selector")
print("=" * 60)
selector = get_selector()
# Seleccionar para diferentes géneros
genres = ['techno', 'house', 'tech-house']
for genre in genres:
print(f"\n{genre.upper()}:")
print("-" * 40)
group = selector.select_for_genre(genre, key='Am', bpm=128)
print(f" Key: {group.key} | BPM: {group.bpm}")
# Drum kit
kit = group.drums
print("\n Drum Kit:")
if kit.kick:
print(f" Kick: {kit.kick.name}")
if kit.snare:
print(f" Snare: {kit.snare.name}")
if kit.clap:
print(f" Clap: {kit.clap.name}")
if kit.hat_closed:
print(f" Hat: {kit.hat_closed.name}")
# Mapeo MIDI
mapping = selector.get_midi_mapping_for_kit(kit)
print("\n Mapeo MIDI:")
for note, info in sorted(mapping['notes'].items())[:4]:
if info['sample']:
print(f" Note {note}: {info['sample'][:40]}...")
# Bass
if group.bass:
print(f"\n Bass ({len(group.bass)}):")
for s in group.bass[:2]:
key_info = f" [{s.key}]" if s.key else ""
print(f" - {s.name}{key_info}")
# Cambio de key
print("\n" + "-" * 40)
print("Cambios de Key Sugeridos (desde Am):")
changes = ['fifth_up', 'fifth_down', 'relative', 'parallel']
for change in changes:
new_key = selector.suggest_key_change('Am', change)
print(f" {change}: {new_key}")
print()
def demo_compatibility():
"""Demostración de búsqueda de samples compatibles"""
print("=" * 60)
print("DEMO: Compatibilidad de Samples")
print("=" * 60)
manager = get_manager()
selector = get_selector()
# Encontrar un sample con key para usar de referencia
samples_with_key = manager.search(key="G#m", limit=1)
if samples_with_key:
reference = samples_with_key[0]
print(f"\nSample de referencia: {reference.name}")
print(f" Key: {reference.key} | BPM: {reference.bpm}")
# Buscar compatibles
compatible = selector.find_compatible_samples(reference, max_results=5)
print("\nSamples compatibles:")
print("-" * 40)
for sample, score in compatible:
bar_len = int(score * 20)
bar = "" * bar_len + "" * (20 - bar_len)
print(f" [{bar}] {score:.1%} - {sample.name}")
print()
def demo_pack_generation():
"""Demostración de generación de packs"""
print("=" * 60)
print("DEMO: Generación de Sample Packs")
print("=" * 60)
manager = get_manager()
genres = ['techno', 'house', 'deep-house']
for genre in genres:
print(f"\n{genre.upper()} Pack:")
print("-" * 40)
pack = manager.get_pack_for_genre(genre, key='Am', bpm=128)
total = 0
for category, samples in pack.items():
if samples:
count = len(samples)
total += count
print(f" {category}: {count}")
print(f" Total: {total} samples")
print()
def main():
"""Ejecutar todas las demos"""
print("\n")
print("=" * 60)
print(" AbletonMCP-AI Sample System Demo ".center(60))
print("=" * 60)
print()
try:
demo_analyzer()
demo_manager()
demo_selector()
demo_compatibility()
demo_pack_generation()
print("=" * 60)
print("Todas las demos completadas exitosamente!")
print("=" * 60)
except Exception as e:
print(f"\nError en demo: {e}")
import traceback
traceback.print_exc()
if __name__ == "__main__":
main()

View File

@@ -1,198 +0,0 @@
"""
segment_rag_builder.py - Build or refresh the persistent segment-audio index.
"""
from __future__ import annotations
import argparse
import json
import logging
from pathlib import Path
from reference_listener import ReferenceAudioListener, export_segment_rag_manifest, generate_segment_rag_summary, _get_segment_rag_status, _backfill_segment_cache_metadata
logger = logging.getLogger(__name__)
def _default_library_dir() -> Path:
return Path(__file__).resolve().parents[2] / "librerias" / "all_tracks"
def main() -> int:
parser = argparse.ArgumentParser(description="Build the persistent segment-audio retrieval cache.")
parser.add_argument("--library-dir", default=str(_default_library_dir()), help="Audio library directory")
parser.add_argument("--roles", nargs="*", default=None, help="Subset of roles to index")
parser.add_argument("--max-files", type=int, default=None, help="Optional limit for targeted files")
parser.add_argument("--duration-limit", type=float, default=24.0, help="Max seconds per file during indexing")
parser.add_argument("--force", action="store_true", help="Rebuild even if persistent segment cache already exists")
parser.add_argument("--json", action="store_true", help="Emit full JSON report")
parser.add_argument("-v", "--verbose", action="store_true", help="Enable verbose output")
parser.add_argument("--offset", type=int, default=0, help="Skip first N files before starting (for chunked indexing)")
parser.add_argument("--batch-size", type=int, default=None, help="Process exactly N files then stop (for chunked indexing)")
parser.add_argument("--output-manifest", type=str, default=None, help="Path to save full manifest JSON")
parser.add_argument("--output-summary", type=str, default=None, help="Path to save summary report")
parser.add_argument("--resume", action="store_true", help="Resume from previous run state")
parser.add_argument("--export-manifest", type=str, default=None,
help="Export candidate manifest to FILE (format: .json or .md)")
parser.add_argument("--export-format", type=str, default="json",
choices=['json', 'markdown'], help="Manifest export format")
parser.add_argument("--status", action="store_true", help="Show current index status without building")
parser.add_argument("--backfill-metadata", action="store_true", help="Backfill metadata into existing cache files from indexing state")
parser.add_argument("--force-backfill", action="store_true", help="Force backfill even for files that already have metadata")
args = parser.parse_args()
# Configure logging based on verbose flag
if args.verbose:
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s')
else:
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
# Handle --status flag for early exit
if args.status:
status = _get_segment_rag_status(Path(args.library_dir))
if args.json:
print(json.dumps(status, indent=2, default=str))
else:
print("=" * 60)
print("SEGMENT RAG INDEX STATUS")
print("=" * 60)
print(f"Cache Directory: {status['cache_dir']}")
print(f"Cache Files: {status['cache_files']}")
print(f"Total Indexed Segments: {status['total_segments']}")
print(f"Status: {status.get('status', 'unknown')}")
if status.get('role_coverage'):
print("\nRole Coverage:")
for role, count in sorted(status['role_coverage'].items()):
print(f" {role}: {count} segments")
if status.get('newest_entries'):
print(f"\nNewest Entries: {len(status['newest_entries'])} files")
for entry in status['newest_entries'][:5]:
print(f" - {entry['file_name']} ({entry['segments']} segments)")
if status.get('oldest_entries'):
print(f"\nOldest Entries: {len(status['oldest_entries'])} files")
for entry in status['oldest_entries'][:5]:
print(f" - {entry['file_name']} ({entry['segments']} segments)")
return 0
# Handle --backfill-metadata flag for early exit
if args.backfill_metadata:
result = _backfill_segment_cache_metadata(Path(args.library_dir), force=args.force_backfill)
if args.json:
print(json.dumps(result, indent=2, default=str))
else:
print("=" * 60)
print("SEGMENT CACHE METADATA BACKFILL")
print("=" * 60)
print(f"Cache Directory: {result['cache_dir']}")
print(f"Cache Files: {result['cache_files']}")
print(f"Backfilled: {result['backfilled']}")
print(f"Skipped: {result['skipped']}")
print(f"Errors: {result['errors']}")
print(f"Status: {result.get('status', 'unknown')}")
return 0
listener = ReferenceAudioListener(args.library_dir)
report = listener.build_segment_rag_index(
roles=args.roles,
max_files=args.max_files,
duration_limit=args.duration_limit,
force=args.force,
offset=args.offset,
batch_size=args.batch_size,
resume=args.resume,
)
# Generate enhanced summary
summary = generate_segment_rag_summary(report, Path(args.library_dir))
if args.json:
print(json.dumps(summary, indent=2, default=str))
else:
# Enhanced text output
print("=" * 60)
print("SEGMENT RAG INDEX COMPLETE")
print("=" * 60)
print(f"Device: {summary['device']}")
print(f"Cache: {summary['segment_index_dir']}")
print()
print(f"Files: {summary['files_targeted']} targeted")
print(f" Built: {summary['built']}")
print(f" Reused: {summary['reused']}")
print(f" Skipped: {summary['skipped']}")
print(f" Errors: {summary['errors']}")
print()
print(f"Total Segments: {summary['total_segments']}")
if 'summary_stats' in summary:
stats = summary['summary_stats']
print(f" Avg per file: {stats['avg_segments_per_file']:.1f}")
print(f" Range: {stats['min_segments']} - {stats['max_segments']}")
if 'role_coverage' in summary:
print("\nRole Coverage:")
for role in sorted(summary['role_coverage'].keys()):
print(f" {role}: {summary['role_coverage'][role]} segments")
if 'cache_info' in summary:
info = summary['cache_info']
print(f"\nCache Size: {info['cache_size_mb']} MB")
if args.offset > 0:
print(f"\nOffset: {args.offset}")
if args.batch_size is not None:
print(f"Batch Size: {args.batch_size}")
print(f"Files Remaining: {summary.get('files_remaining', 'unknown')}")
# Save manifest if requested
if args.output_manifest:
manifest_path = Path(args.output_manifest)
manifest_path.parent.mkdir(parents=True, exist_ok=True)
with open(manifest_path, 'w') as f:
json.dump({
"report": report,
"full_manifest": report.get("manifest", []),
}, f, indent=2)
if not args.json:
print(f"\nManifest saved to: {manifest_path}")
# Save summary if requested
if args.output_summary:
summary_path = Path(args.output_summary)
summary_path.parent.mkdir(parents=True, exist_ok=True)
with open(summary_path, 'w') as f:
json.dump(summary, f, indent=2, default=str)
if not args.json:
print(f"Summary saved to: {summary_path}")
# Export manifest in requested format
if args.export_manifest:
manifest_path = Path(args.export_manifest)
export_format = args.export_format
# Determine format from extension if not specified
if not args.export_format or args.export_format == "json":
if manifest_path.suffix == '.md':
export_format = 'markdown'
else:
export_format = 'json'
export_segment_rag_manifest(
report.get('manifest', []),
manifest_path,
format=export_format
)
print(f"Manifest exported to: {manifest_path}")
return 0
if __name__ == "__main__":
raise SystemExit(main())

File diff suppressed because it is too large Load Diff

View File

@@ -1,798 +0,0 @@
import argparse
import json
import socket
from datetime import datetime
from typing import Any, Dict, List, Tuple
try:
from song_generator import SongGenerator
except ImportError:
SongGenerator = None
STRUCTURE_SCENE_COUNTS = {
"minimal": 4,
"standard": 6,
"extended": 7,
}
# Expected buses for Phase 7 validation
EXPECTED_BUSES = ["drums", "bass", "music", "vocal", "fx"]
EXPECTED_CRITICAL_ROLES = {"kick", "bass", "clap", "hat"}
EXPECTED_AUDIO_FX_LAYERS = ["AUDIO ATMOS", "AUDIO CRASH FX", "AUDIO TRANSITION FILL"]
EXPECTED_BUS_NAMES = ["DRUMS", "BASS", "MUSIC"]
MIN_TRACKS_FOR_EXPORT = 6
MIN_BUSES_FOR_EXPORT = 3
MIN_RETURNS_FOR_EXPORT = 2
MASTER_VOLUME_RANGE = (0.75, 0.95)
# Expected AUDIO RESAMPLE track names
AUDIO_RESAMPLE_TRACKS = [
"AUDIO RESAMPLE REVERSE FX",
"AUDIO RESAMPLE RISER",
"AUDIO RESAMPLE DOWNLIFTER",
"AUDIO RESAMPLE STUTTER",
]
# Bus routing map: track role -> expected bus output
BUS_ROUTING_MAP = {
"kick": {"drums"},
"snare": {"drums"},
"clap": {"drums"},
"hat": {"drums"},
"perc": {"drums"},
"sub_bass": {"bass"},
"bass": {"bass"},
"chords": {"music"},
"pad": {"music"},
"pluck": {"music"},
"lead": {"music"},
"vocal": {"vocal"},
"vocal_chop": {"vocal"},
"reverse_fx": {"fx"},
"riser": {"fx"},
"impact": {"fx"},
"atmos": {"fx"},
"crash": {"drums", "fx"},
}
def _extract_bus_payload(payload: Any) -> List[Dict[str, Any]]:
if isinstance(payload, list):
return [item for item in payload if isinstance(item, dict)]
if isinstance(payload, dict):
buses = payload.get("buses", [])
if isinstance(buses, list):
return [item for item in buses if isinstance(item, dict)]
return []
def _normalize_bus_key(name: str) -> str:
normalized = "".join(ch for ch in (name or "").lower() if ch.isalnum())
if not normalized:
return ""
if "drum" in normalized or "groove" in normalized:
return "drums"
if "bass" in normalized or "tube" in normalized or "subdeep" in normalized:
return "bass"
if "music" in normalized or "wide" in normalized:
return "music"
if "vocal" in normalized or "vox" in normalized or "tail" in normalized:
return "vocal"
if "fx" in normalized or "wash" in normalized:
return "fx"
return ""
def _canonical_track_name(name: str) -> str:
text = (name or "").strip().lower()
if not text:
return ""
if " (" in text:
text = text.split(" (", 1)[0].strip()
return text
class AbletonSocketClient:
def __init__(self, host: str = "127.0.0.1", port: int = 9877, timeout: float = 15.0):
self.host = host
self.port = port
self.timeout = timeout
def send(self, command_type: str, params: Dict[str, Any] = None) -> Dict[str, Any]:
payload = json.dumps({
"type": command_type,
"params": params or {},
}).encode("utf-8") + b"\n"
with socket.create_connection((self.host, self.port), timeout=self.timeout) as sock:
sock.sendall(payload)
reader = sock.makefile("r", encoding="utf-8")
try:
line = reader.readline()
finally:
reader.close()
try:
sock.shutdown(socket.SHUT_RDWR)
except OSError:
pass
if not line:
raise RuntimeError(f"No response for command: {command_type}")
return json.loads(line)
def expect_success(name: str, response: Dict[str, Any]) -> Dict[str, Any]:
if response.get("status") != "success":
raise RuntimeError(f"{name} failed: {response}")
return response.get("result", {})
class TestResult:
"""Tracks test results for reporting."""
def __init__(self):
self.passed: List[Tuple[str, str]] = []
self.failed: List[Tuple[str, str]] = []
self.skipped: List[Tuple[str, str]] = []
self.warnings: List[Tuple[str, str]] = []
def add_pass(self, name: str, details: str = ""):
self.passed.append((name, details))
def add_fail(self, name: str, error: str):
self.failed.append((name, error))
def add_skip(self, name: str, reason: str):
self.skipped.append((name, reason))
def add_warning(self, name: str, message: str):
self.warnings.append((name, message))
def to_dict(self) -> Dict[str, Any]:
return {
"summary": {
"total": len(self.passed) + len(self.failed) + len(self.skipped) + len(self.warnings),
"passed": len(self.passed),
"failed": len(self.failed),
"skipped": len(self.skipped),
"warnings": len(self.warnings),
"status": "PASS" if len(self.failed) == 0 else "FAIL",
},
"passed_tests": [{"name": n, "details": d} for n, d in self.passed],
"failed_tests": [{"name": n, "error": d} for n, d in self.failed],
"skipped_tests": [{"name": n, "reason": d} for n, d in self.skipped],
"warnings": [{"name": n, "message": d} for n, d in self.warnings],
}
def print_report(self):
print("\n" + "=" * 60)
print("PHASE 7 SMOKE TEST REPORT")
print("=" * 60)
print(f"Timestamp: {datetime.now().isoformat()}")
print(f"Total: {len(self.passed) + len(self.failed) + len(self.skipped) + len(self.warnings)}")
print(f"Passed: {len(self.passed)}")
print(f"Failed: {len(self.failed)}")
print(f"Skipped: {len(self.skipped)}")
print(f"Warnings: {len(self.warnings)}")
print("-" * 60)
if self.passed:
print("\n[PASSED]")
for name, details in self.passed:
print(f" [OK] {name}: {details}")
if self.failed:
print("\n[FAILED]")
for name, error in self.failed:
print(f" [FAIL] {name}: {error}")
if self.warnings:
print("\n[WARNINGS]")
for name, message in self.warnings:
print(f" [WARN] {name}: {message}")
if self.skipped:
print("\n[SKIPPED]")
for name, reason in self.skipped:
print(f" [SKIP] {name}: {reason}")
print("\n" + "=" * 60)
status = "PASS" if len(self.failed) == 0 else "FAIL"
print(f"FINAL STATUS: {status}")
print("=" * 60 + "\n")
def run_readonly_checks(client: AbletonSocketClient) -> List[Tuple[str, str]]:
checks = []
expect_success("get_session_info", client.send("get_session_info"))
checks.append((
"get_session_info",
# f"tempo={session.get('tempo')} tracks={session.get('num_tracks')} scenes={session.get('num_scenes')}",
))
tracks = expect_success("get_tracks", client.send("get_tracks"))
checks.append(("get_tracks", f"tracks={len(tracks)}"))
return checks
def run_generation_check(
client: AbletonSocketClient,
genre: str,
style: str,
bpm: float,
key: str,
structure: str,
use_blueprint: bool = False,
) -> List[Tuple[str, str]]:
checks = []
params = {
"genre": genre,
"style": style,
"bpm": bpm,
"key": key,
"structure": structure,
}
if use_blueprint and SongGenerator is not None:
params = SongGenerator().generate_config(genre, style, bpm, key, structure)
result = expect_success(
"generate_complete_song",
client.send("generate_complete_song", params),
)
checks.append((
"generate_complete_song",
f"tracks={result.get('tracks')} scenes={result.get('scenes')} structure={result.get('structure')}",
))
session = expect_success("post_generate_session_info", client.send("get_session_info"))
actual_scenes = session.get("num_scenes")
expected_scenes = len(params.get("sections", [])) if use_blueprint and isinstance(params, dict) and params.get("sections") else STRUCTURE_SCENE_COUNTS.get(structure.lower())
if expected_scenes is not None and actual_scenes != expected_scenes:
raise RuntimeError(
f"scene count mismatch after generate_complete_song: expected {expected_scenes}, got {actual_scenes}"
)
checks.append((
"post_generate_session_info",
f"tracks={session.get('num_tracks')} scenes={actual_scenes}",
))
return checks
def run_bus_checks(client: AbletonSocketClient, results: TestResult) -> None:
"""Verify buses are created correctly."""
try:
buses_payload = expect_success("list_buses", client.send("list_buses"))
buses = _extract_bus_payload(buses_payload)
bus_keys = {_normalize_bus_key(bus.get("name", "")) for bus in buses}
bus_keys.discard("")
found_buses = []
missing_buses = []
for expected in EXPECTED_BUSES:
if expected in bus_keys:
found_buses.append(expected)
else:
missing_buses.append(expected)
if found_buses:
results.add_pass("buses_found", f"found={found_buses}")
if missing_buses:
# Not a failure if buses don't exist yet - they may be created during generation
results.add_skip("buses_missing", f"not_found={missing_buses} (may be created during generation)")
else:
results.add_pass("buses_complete", "all expected buses present")
except Exception as e:
results.add_fail("buses_check", str(e))
def run_routing_checks(client: AbletonSocketClient, results: TestResult) -> None:
"""Verify track routing is configured correctly."""
try:
tracks = expect_success("get_tracks", client.send("get_tracks"))
if not tracks:
results.add_skip("routing_check", "no tracks to verify routing")
return
correct_routing = 0
incorrect_routing = []
no_routing = 0
for track in tracks:
original_track_name = track.get("name", "")
track_name = _canonical_track_name(original_track_name)
output_routing = track.get("current_output_routing", "")
output_bus_key = _normalize_bus_key(output_routing)
track_bus_key = _normalize_bus_key(track_name)
if output_routing and output_routing.lower() != "master":
correct_routing += 1
elif not output_routing:
no_routing += 1
if track_bus_key:
continue
for role, expected_bus in BUS_ROUTING_MAP.items():
if role in track_name:
if output_bus_key in expected_bus:
correct_routing += 1
elif output_routing.lower() != "master":
expected_label = "/".join(sorted(expected_bus))
incorrect_routing.append(f"{original_track_name.lower()} -> {output_routing} (expected {expected_label})")
results.add_pass("routing_summary", f"correct={correct_routing} no_routing={no_routing}")
if incorrect_routing:
results.add_fail("routing_mismatches", ", ".join(incorrect_routing[:5]))
elif correct_routing > 0:
results.add_pass("routing_correct", f"{correct_routing} tracks with non-master routing")
except Exception as e:
results.add_fail("routing_check", str(e))
def run_audio_resample_checks(client: AbletonSocketClient, results: TestResult) -> None:
"""Verify AUDIO RESAMPLE tracks exist."""
try:
tracks = expect_success("get_tracks", client.send("get_tracks"))
track_names = [t.get("name", "") for t in tracks]
found_layers = []
missing_layers = []
for expected in AUDIO_RESAMPLE_TRACKS:
if any(expected.upper() in name.upper() for name in track_names):
found_layers.append(expected)
else:
missing_layers.append(expected)
if found_layers:
results.add_pass("audio_resample_found", f"layers={found_layers}")
if missing_layers:
results.add_skip("audio_resample_missing", f"not_found={missing_layers} (may require reference audio)")
else:
results.add_pass("audio_resample_complete", "all 4 resample layers present")
# Verify they are audio tracks
for track in tracks:
name = track.get("name", "").upper()
if "AUDIO RESAMPLE" in name:
if track.get("has_audio_input"):
results.add_pass(f"audio_track_type_{name[:20]}", "correct audio track type")
else:
results.add_fail(f"audio_track_type_{name[:20]}", "expected audio track")
except Exception as e:
results.add_fail("audio_resample_check", str(e))
def run_automation_snapshot_checks(client: AbletonSocketClient, results: TestResult) -> None:
"""Verify automation and device parameter snapshots."""
try:
tracks = expect_success("get_tracks", client.send("get_tracks"))
total_devices = 0
tracks_with_devices = 0
tracks_with_automation = 0
for track in tracks:
num_devices = track.get("num_devices", 0)
if num_devices > 0:
total_devices += num_devices
tracks_with_devices += 1
# Check for arrangement clips (may contain automation)
arrangement_clips = track.get("arrangement_clip_count", 0)
if arrangement_clips > 0:
tracks_with_automation += 1
if tracks_with_devices > 0:
results.add_pass("automation_devices", f"tracks_with_devices={tracks_with_devices} total_devices={total_devices}")
else:
results.add_skip("automation_devices", "no devices found")
if tracks_with_automation > 0:
results.add_pass("automation_clips", f"tracks_with_arrangement_clips={tracks_with_automation}")
else:
results.add_skip("automation_clips", "no arrangement clips (may need to commit to arrangement)")
# Try to get device parameters for first track with devices
for i, track in enumerate(tracks):
if track.get("num_devices", 0) > 0:
try:
devices = expect_success("get_devices", client.send("get_devices", {"track_index": i}))
if devices:
params_sample = []
for dev in devices[:3]:
params = dev.get("parameters", [])
if params:
params_sample.append(f"{dev.get('name', '?')}:{len(params)}params")
if params_sample:
results.add_pass("automation_params_snapshot", ", ".join(params_sample[:3]))
break
except Exception:
pass
break
except Exception as e:
results.add_fail("automation_snapshot_check", str(e))
def run_loudness_checks(client: AbletonSocketClient, results: TestResult) -> None:
"""Verify basic loudness levels using output meters."""
try:
tracks = expect_success("get_tracks", client.send("get_tracks"))
tracks_with_signal = 0
max_level = 0.0
level_samples = []
for track in tracks:
output_level = track.get("output_meter_level", 0.0)
left = track.get("output_meter_left", 0.0)
right = track.get("output_meter_right", 0.0)
if output_level and output_level > 0:
tracks_with_signal += 1
max_level = max(max_level, output_level)
level_samples.append(f"{track.get('name', '?')[:15]}:{output_level:.2f}")
# Check for stereo balance
if left and right and left > 0 and right > 0:
balance = abs(left - right)
if balance < 0.1:
pass # Balanced stereo
if tracks_with_signal > 0:
results.add_pass("loudness_signal_detected", f"tracks_with_signal={tracks_with_signal} max_level={max_level:.3f}")
else:
results.add_skip("loudness_signal", "no signal detected (playback may be stopped)")
# Check for clipping (levels > 1.0)
if max_level > 1.0:
results.add_fail("loudness_clipping", f"max_level={max_level:.3f} indicates potential clipping")
else:
results.add_pass("loudness_no_clipping", f"max_level={max_level:.3f}")
# Sample levels for verification
if level_samples:
results.add_pass("loudness_levels", ", ".join(level_samples[:5]))
except Exception as e:
results.add_fail("loudness_check", str(e))
def run_critical_layer_checks(client: AbletonSocketClient, results: TestResult) -> None:
"""Verify critical layers (kick, bass, clap, hat) exist and have content."""
try:
tracks = expect_success("get_tracks", client.send("get_tracks"))
track_names = [str(t.get("name", "")).upper() for t in tracks if isinstance(t, dict)]
found_layers = {role: False for role in EXPECTED_CRITICAL_ROLES}
for track_name in track_names:
for role in EXPECTED_CRITICAL_ROLES:
if role.upper() in track_name or f"AUDIO {role.upper()}" in track_name:
found_layers[role] = True
break
for role, found in found_layers.items():
if found:
results.add_pass(f"critical_layer_{role}", "found in tracks")
else:
results.add_fail(f"critical_layer_{role}", "missing - set may sound incomplete")
except Exception as e:
results.add_fail("critical_layer_check", str(e))
def run_derived_fx_checks(client: AbletonSocketClient, results: TestResult) -> None:
"""Verify derived FX tracks (AUDIO RESAMPLE) are present."""
try:
tracks = expect_success("get_tracks", client.send("get_tracks"))
track_names = [str(t.get("name", "")).upper() for t in tracks if isinstance(t, dict)]
found_derived = []
missing_derived = []
for expected in AUDIO_RESAMPLE_TRACKS:
if any(expected.upper() in name for name in track_names):
found_derived.append(expected)
else:
missing_derived.append(expected)
if found_derived:
results.add_pass("derived_fx_found", f"layers={found_derived}")
if missing_derived:
results.add_skip("derived_fx_missing", f"not_found={missing_derived} (may require reference audio)")
else:
results.add_pass("derived_fx_complete", "all 4 resample layers present")
except Exception as e:
results.add_fail("derived_fx_check", str(e))
def run_export_readiness_checks(client: AbletonSocketClient, results: TestResult) -> None:
"""Verify set is ready for export."""
try:
expect_success("get_session_info", client.send("get_session_info"))
tracks = expect_success("get_tracks", client.send("get_tracks"))
issues = []
track_count = len(tracks) if isinstance(tracks, list) else 0
if track_count < MIN_TRACKS_FOR_EXPORT:
issues.append(f"insufficient_tracks: {track_count} (need {MIN_TRACKS_FOR_EXPORT}+)")
master_response = client.send("get_track_info", {"track_type": "master", "track_index": 0})
if master_response.get("status") == "success":
master_volume = float(master_response.get("result", {}).get("volume", 0.85))
if master_volume < MASTER_VOLUME_RANGE[0]:
issues.append(f"master_volume_low: {master_volume:.2f}")
elif master_volume > MASTER_VOLUME_RANGE[1]:
issues.append(f"master_volume_high: {master_volume:.2f}")
muted_count = sum(1 for t in tracks if isinstance(t, dict) and t.get("mute", False))
if muted_count > track_count * 0.5:
issues.append(f"too_many_muted: {muted_count}/{track_count}")
if issues:
results.add_pass("export_readiness_issues", f"issues={len(issues)}")
for issue in issues:
results.add_fail(f"export_ready_{issue.split(':')[0]}", issue)
else:
results.add_pass("export_ready", "set appears ready for export")
except Exception as e:
results.add_fail("export_readiness_check", str(e))
def run_midi_clip_content_checks(client: AbletonSocketClient, results: TestResult) -> None:
"""Verify MIDI tracks have clips with notes."""
try:
tracks = expect_success("get_tracks", client.send("get_tracks"))
midi_tracks_empty = []
midi_tracks_with_notes = 0
for track in tracks:
if not isinstance(track, dict):
continue
track_type = str(track.get("type", "")).lower()
if track_type != "midi":
continue
track_name = track.get("name", "?")
clips = track.get("clips", [])
if not isinstance(clips, list):
clips = []
has_notes = False
empty_clips = []
for clip in clips:
if not isinstance(clip, dict):
continue
notes_count = clip.get("notes_count", 0)
has_notes_flag = clip.get("has_notes", None)
if has_notes_flag is True or notes_count > 0:
has_notes = True
elif has_notes_flag is False or (has_notes_flag is None and notes_count == 0):
empty_clips.append(clip.get("name", "?"))
if has_notes:
midi_tracks_with_notes += 1
elif empty_clips:
midi_tracks_empty.append({
"track_name": track_name,
"empty_clips_count": len(empty_clips),
})
if midi_tracks_with_notes > 0:
results.add_pass("midi_tracks_with_notes", f"count={midi_tracks_with_notes}")
if midi_tracks_empty:
for track_info in midi_tracks_empty[:3]:
results.add_fail(
f"midi_track_empty_{track_info['track_name'][:20]}",
f"Track has {track_info['empty_clips_count']} empty MIDI clips - may need notes"
)
except Exception as e:
results.add_fail("midi_clip_content_check", str(e))
def run_bus_signal_checks(client: AbletonSocketClient, results: TestResult) -> None:
"""Verify buses receive signal from tracks."""
try:
buses_payload = expect_success("list_buses", client.send("list_buses"))
buses = _extract_bus_payload(buses_payload)
tracks = expect_success("get_tracks", client.send("get_tracks"))
bus_signal_map = {}
for bus in buses:
if not isinstance(bus, dict):
continue
bus_name = bus.get("name", "").upper()
bus_signal_map[bus_name] = {"senders": [], "has_signal": False}
for track in tracks:
if not isinstance(track, dict):
continue
track_name = str(track.get("name", "")).upper()
output_routing = str(track.get("current_output_routing", "")).upper()
for bus_name in bus_signal_map:
if bus_name in output_routing:
bus_signal_map[bus_name]["senders"].append(track_name)
sends = track.get("sends", [])
if isinstance(sends, list):
for send_level in sends:
try:
if float(send_level) > 0.01:
pass
except (TypeError, ValueError):
pass
buses_without_senders = []
buses_with_senders = []
for bus_name, info in bus_signal_map.items():
if info["senders"]:
buses_with_senders.append(bus_name)
else:
buses_without_senders.append(bus_name)
if buses_with_senders:
results.add_pass("buses_with_signal", f"buses={buses_with_senders}")
if buses_without_senders:
for bus_name in buses_without_senders[:3]:
results.add_fail(f"bus_no_signal_{bus_name[:15]}",
f"Bus '{bus_name}' has no routed tracks - will not produce output")
except Exception as e:
results.add_fail("bus_signal_check", str(e))
def run_clipping_detection(client: AbletonSocketClient, results: TestResult) -> None:
"""Detect tracks with dangerously high volume (clipping risk)."""
try:
tracks = expect_success("get_tracks", client.send("get_tracks"))
clipping_tracks = []
high_volume_tracks = []
for track in tracks:
if not isinstance(track, dict):
continue
track_name = track.get("name", "?")
volume = float(track.get("volume", 0.85))
if volume > 0.95:
clipping_tracks.append({"name": track_name, "volume": volume})
elif volume > 0.90:
high_volume_tracks.append({"name": track_name, "volume": volume})
if clipping_tracks:
for track_info in clipping_tracks[:3]:
results.add_fail(f"clipping_track_{track_info['name'][:15]}",f"Volume {track_info['volume']:.2f} > 0.95 - CLIPPING RISK")
if high_volume_tracks:
for track_info in high_volume_tracks[:3]:
results.add_warning(f"high_volume_{track_info['name'][:15]}",
f"Volume {track_info['volume']:.2f} - consider reducing")
if not clipping_tracks and not high_volume_tracks:
results.add_pass("no_clipping_tracks", "All track volumes in safe range")
except Exception as e:
results.add_fail("clipping_detection", str(e))
def run_all_phase7_tests(client: AbletonSocketClient, results: TestResult) -> None:
"""Run all Phase 7 smoke tests."""
print("\n[Phase 7] Running bus verification...")
run_bus_checks(client, results)
print("[Phase 7] Running routing verification...")
run_routing_checks(client, results)
print("[Phase 7] Running AUDIO RESAMPLE track verification...")
run_audio_resample_checks(client, results)
print("[Phase 7] Running automation snapshot verification...")
run_automation_snapshot_checks(client, results)
print("[Phase 7] Running loudness verification...")
run_loudness_checks(client, results)
print("[Phase 7] Running critical layer verification...")
run_critical_layer_checks(client, results)
print("[Phase 7] Running derived FX verification...")
run_derived_fx_checks(client, results)
print("[Phase 7] Running export readiness verification...")
run_export_readiness_checks(client, results)
print("[Phase 7] Running MIDI clip content verification...")
run_midi_clip_content_checks(client, results)
print("[Phase 7] Running bus signal verification...")
run_bus_signal_checks(client, results)
print("[Phase 7] Running clipping detection...")
run_clipping_detection(client, results)
def main() -> int:
parser = argparse.ArgumentParser(description="Smoke test for AbletonMCP_AI socket runtime")
parser.add_argument("--host", default="127.0.0.1")
parser.add_argument("--port", type=int, default=9877)
parser.add_argument("--timeout", type=float, default=15.0)
parser.add_argument("--generate-demo", action="store_true")
parser.add_argument("--genre", default="techno")
parser.add_argument("--style", default="industrial")
parser.add_argument("--bpm", type=float, default=128.0)
parser.add_argument("--key", default="Am")
parser.add_argument("--structure", default="standard")
parser.add_argument("--use-blueprint", action="store_true")
parser.add_argument("--phase7", action="store_true", help="Run Phase 7 extended tests (buses, routing, audio resample, automation, loudness)")
parser.add_argument("--json-report", action="store_true", help="Output report as JSON")
args = parser.parse_args()
client = AbletonSocketClient(host=args.host, port=args.port, timeout=args.timeout)
# Run basic checks
print("[Basic] Running readonly checks...")
checks = run_readonly_checks(client)
for name, details in checks:
print(f"[ok] {name}: {details}")
# Run generation check if requested
if args.generate_demo:
print("\n[Generation] Running generation check...")
checks.extend(
run_generation_check(
client,
genre=args.genre,
style=args.style,
bpm=args.bpm,
key=args.key,
structure=args.structure,
use_blueprint=args.use_blueprint,
)
)
for name, details in checks[-2:]:
print(f"[ok] {name}: {details}")
# Run Phase 7 tests if requested
results = TestResult()
if args.phase7:
run_all_phase7_tests(client, results)
if args.json_report:
print(json.dumps(results.to_dict(), indent=2))
else:
results.print_report()
return 0 if len(results.failed) == 0 else 1
return 0
if __name__ == "__main__":
raise SystemExit(main())

File diff suppressed because it is too large Load Diff

View File

@@ -1,177 +0,0 @@
from __future__ import annotations
import argparse
import gzip
import json
from collections import Counter
from pathlib import Path
import xml.etree.ElementTree as ET
def _node_name(node: ET.Element | None) -> str:
if node is None:
return ""
for tag in ("EffectiveName", "UserName", "Name"):
child = node.find(tag)
if child is not None:
value = child.attrib.get("Value", "")
if value:
return value
return node.attrib.get("Value", "")
def _device_name(device: ET.Element) -> str:
if device.tag == "PluginDevice":
info = device.find("PluginDesc/VstPluginInfo")
if info is None:
info = device.find("PluginDesc/AuPluginInfo")
if info is not None:
plug = info.find("PlugName")
if plug is not None and plug.attrib.get("Value"):
return plug.attrib["Value"]
return device.tag
def _session_clip_count(track: ET.Element) -> int:
count = 0
for slot in track.findall("./DeviceChain/MainSequencer/ClipSlotList/ClipSlot"):
if slot.find("Value/MidiClip") is not None or slot.find("Value/AudioClip") is not None:
count += 1
return count
def _arrangement_clip_count(track: ET.Element) -> int:
return len(track.findall(".//MainSequencer//MidiClip")) + len(
track.findall(".//MainSequencer//AudioClip")
)
def _tempo_value(live_set: ET.Element) -> float | None:
node = live_set.find(".//Tempo/Manual")
if node is None:
return None
try:
return float(node.attrib.get("Value", "0"))
except ValueError:
return None
def _locator_summary(live_set: ET.Element) -> list[dict[str, float | str | None]]:
locators: list[tuple[float, str]] = []
for locator in live_set.findall(".//Locators/Locators/Locator"):
try:
time = float(locator.find("Time").attrib.get("Value", "0"))
except (AttributeError, ValueError):
time = 0.0
name = _node_name(locator.find("Name"))
locators.append((time, name))
locators.sort(key=lambda item: item[0])
summary: list[dict[str, float | str | None]] = []
for index, (time, name) in enumerate(locators):
next_time = locators[index + 1][0] if index + 1 < len(locators) else None
summary.append(
{
"time_beats": time,
"name": name,
"section_length_beats": None if next_time is None else next_time - time,
}
)
return summary
def _arrangement_length_beats(root: ET.Element) -> float:
max_end = 0.0
for clip in root.findall(".//MidiClip") + root.findall(".//AudioClip"):
current_end = clip.find("CurrentEnd")
start = clip.attrib.get("Time")
if current_end is None or start is None:
continue
try:
end = float(start) + float(current_end.attrib.get("Value", "0"))
except ValueError:
continue
max_end = max(max_end, end)
return max_end
def analyze_set(als_path: Path) -> dict:
with gzip.open(als_path, "rb") as handle:
root = ET.parse(handle).getroot()
live_set = root.find("LiveSet")
if live_set is None:
raise ValueError(f"Invalid ALS file: {als_path}")
tracks = list(live_set.find("Tracks") or [])
track_summaries = []
device_counter: Counter[str] = Counter()
for track in tracks:
devices = track.findall("./DeviceChain/DeviceChain/Devices/*")
device_names = [_device_name(device) for device in devices]
device_counter.update(device_names)
track_summaries.append(
{
"type": track.tag,
"name": _node_name(track.find("Name")),
"group_id": track.find("TrackGroupId").attrib.get("Value", "")
if track.find("TrackGroupId") is not None
else "",
"session_clip_count": _session_clip_count(track),
"arrangement_clip_count": _arrangement_clip_count(track),
"devices": device_names,
}
)
automation_events = 0
for automation in root.findall(".//ArrangerAutomation"):
automation_events += len(automation.findall(".//FloatEvent"))
automation_events += len(automation.findall(".//EnumEvent"))
automation_events += len(automation.findall(".//BoolEvent"))
return {
"file": str(als_path),
"tempo": _tempo_value(live_set),
"track_type_counts": dict(Counter(track.tag for track in tracks)),
"scene_count": len(live_set.findall("./SceneNames/Scene")),
"locators": _locator_summary(live_set),
"arrangement_length_beats": _arrangement_length_beats(root),
"automation_event_count": automation_events,
"top_devices": dict(device_counter.most_common(16)),
"tracks": track_summaries,
}
def main() -> None:
parser = argparse.ArgumentParser(description="Analyze Ableton .als templates.")
parser.add_argument("path", nargs="?", default=".", help="Folder containing .als files")
parser.add_argument("--json", action="store_true", help="Emit JSON")
args = parser.parse_args()
base = Path(args.path).resolve()
results = [analyze_set(path) for path in sorted(base.rglob("*.als"))]
if args.json:
print(json.dumps(results, indent=2))
return
for result in results:
print(f"=== {Path(result['file']).name} ===")
print(f"tempo: {result['tempo']}")
print(f"tracks: {result['track_type_counts']}")
print(f"scenes: {result['scene_count']}")
print(f"arrangement_length_beats: {result['arrangement_length_beats']}")
print(f"automation_event_count: {result['automation_event_count']}")
print("locators:")
for locator in result["locators"]:
print(
f" - {locator['time_beats']:>6} {locator['name']}"
f" len={locator['section_length_beats']}"
)
print("top_devices:")
for name, count in result["top_devices"].items():
print(f" - {name}: {count}")
print()
if __name__ == "__main__":
main()

View File

@@ -1,452 +0,0 @@
import os
import json
import logging
import argparse
from pathlib import Path
from typing import List, Dict, Tuple, Optional
from multiprocessing import Pool, cpu_count
import functools
try:
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
HAS_ML = True
except ImportError:
HAS_ML = False
# Import AudioAnalyzer for spectral analysis
try:
from audio_analyzer import AudioAnalyzer, analyze_sample
HAS_AUDIO_ANALYZER = True
except ImportError:
HAS_AUDIO_ANALYZER = False
logger = logging.getLogger("VectorManager")
logging.basicConfig(level=logging.INFO)
# Global analyzer for multiprocessing workers (initialized once per worker)
_worker_analyzer = None
def _init_worker():
"""Initialize the audio analyzer for each worker process."""
global _worker_analyzer
if HAS_AUDIO_ANALYZER:
try:
_worker_analyzer = AudioAnalyzer(backend="auto")
except Exception:
_worker_analyzer = None
def _process_single_file(args):
"""
Process a single audio file and return its metadata.
Used for multiprocessing parallel execution.
"""
f, library_dir, skip_audio_analysis = args
f = Path(f)
import soundfile as sf
# Clean up the name for better semantic understanding
name = f.stem
name_lower = name.lower()
clean_name = name.replace('_', ' ').replace('-', ' ').lower()
# Keywords that strongly suggest a full song/mix
full_song_keywords = {'original mix', 'extended mix', 'full mix', 'edit', 'master', '320kbps', 'remix'}
# Extract duration
duration = 0.0
try:
info = sf.info(str(f))
duration = info.duration
except Exception:
duration = -1.0
# Detect if it's likely a full song based on name and duration
is_full_song = False
if duration > 45.0:
is_full_song = True
elif any(kw in name_lower for kw in full_song_keywords) and duration > 30.0:
is_full_song = True
# Spectral analysis with AudioAnalyzer
key = None
key_confidence = 0.0
spectral_centroid = None
is_harmonic = None
global _worker_analyzer
if not skip_audio_analysis and _worker_analyzer is not None:
try:
features = _worker_analyzer.analyze(str(f))
key = features.key
key_confidence = features.key_confidence
spectral_centroid = features.spectral_centroid
is_harmonic = features.is_harmonic
except Exception:
pass
# Use relative path as part of the context
try:
rel_path = f.relative_to(library_dir)
parts = rel_path.parts[:-1]
path_context = " ".join(parts).lower()
except ValueError:
path_context = ""
description = f"{clean_name} {path_context}"
metadata = {
'path': str(f),
'name': name,
'description': description,
'duration': duration,
'is_full_song': is_full_song,
'key': key,
'key_confidence': key_confidence,
'spectral_centroid': spectral_centroid,
'is_harmonic': is_harmonic
}
return metadata, description
class VectorManager:
def __init__(self, library_dir: str, skip_audio_analysis: bool = False):
self.library_dir = Path(library_dir)
self.index_file = self.library_dir / ".sample_embeddings.json"
self.skip_audio_analysis = skip_audio_analysis
self.model = None
self.embeddings = []
self.metadata = []
# Audio analyzer instance for spectral analysis
self._audio_analyzer: Optional[AudioAnalyzer] = None
if HAS_AUDIO_ANALYZER and not skip_audio_analysis:
try:
self._audio_analyzer = AudioAnalyzer(backend="auto")
logger.info("AudioAnalyzer initialized for spectral analysis")
except Exception as e:
logger.warning(f"Failed to initialize AudioAnalyzer: {e}")
self._audio_analyzer = None
if HAS_ML:
try:
# Load a very lightweight model for fast embeddings
logger.info("Loading sentence-transformers model (all-MiniLM-L6-v2)...")
self.model = SentenceTransformer('all-MiniLM-L6-v2')
except Exception as e:
logger.error(f"Failed to load embedding model: {e}")
self._load_or_build_index()
def _get_library_fingerprint(self) -> Dict:
"""Compute a fingerprint of the library directory for change detection (BF-02/MJ-07)."""
extensions = {'.wav', '.aif', '.aiff', '.mp3'}
file_count = 0
latest_mtime = 0.0
try:
for ext in extensions:
for f in self.library_dir.rglob('*' + ext):
file_count += 1
try:
mtime = f.stat().st_mtime
if mtime > latest_mtime:
latest_mtime = mtime
except OSError:
pass
for f in self.library_dir.rglob('*' + ext.upper()):
file_count += 1
try:
mtime = f.stat().st_mtime
if mtime > latest_mtime:
latest_mtime = mtime
except OSError:
pass
except Exception:
pass
return {'file_count': file_count, 'latest_mtime': latest_mtime}
def _load_or_build_index(self):
if self.index_file.exists():
logger.info("Loading existing vector index...")
try:
with open(self.index_file, 'r', encoding='utf-8') as f:
data = json.load(f)
self.metadata = data.get('metadata', [])
# BF-02/MJ-07: Check library fingerprint for auto-rebuild
stored_fp = data.get('library_fingerprint', {})
current_fp = self._get_library_fingerprint()
stored_count = stored_fp.get('file_count', 0)
current_count = current_fp.get('file_count', 0)
if current_count != stored_count and stored_count > 0:
logger.info(f"Library changed ({stored_count} -> {current_count} files). Rebuilding index...")
self._build_index()
return
if HAS_ML and 'embeddings' in data:
self.embeddings = np.array(data['embeddings'])
else:
logger.warning("No embeddings found in loaded index.")
except Exception as e:
logger.error(f"Failed to load index: {e}")
self._build_index()
else:
self._build_index()
def _build_index(self):
logger.info(f"Scanning library {self.library_dir} for new embeddings...")
extensions = {'.wav', '.aif', '.aiff', '.mp3'}
files_to_process = []
for ext in extensions:
files_to_process.extend(self.library_dir.rglob('*' + ext))
files_to_process.extend(self.library_dir.rglob('*' + ext.upper()))
if not files_to_process:
logger.warning(f"No audio files found in {self.library_dir} to embed.")
return
# Get unique files
unique_files = list(set(str(f) for f in files_to_process))
total_files = len(unique_files)
logger.info(f"Found {total_files} audio files to process")
# Determine number of workers (use 50% of available CPUs)
num_workers = max(1, cpu_count() // 2)
logger.info(f"Using {num_workers} CPU cores for parallel processing (50% capacity)")
# Prepare arguments for parallel processing
args_list = [(f, str(self.library_dir), self.skip_audio_analysis) for f in unique_files]
# Process files in parallel using multiprocessing
texts_to_embed = []
self.metadata = []
if not self.skip_audio_analysis and HAS_AUDIO_ANALYZER:
# Use multiprocessing with audio analysis
logger.info("Starting parallel audio analysis...")
with Pool(processes=num_workers, initializer=_init_worker) as pool:
results = pool.map(_process_single_file, args_list)
for metadata, description in results:
self.metadata.append(metadata)
texts_to_embed.append(description)
else:
# Fallback to sequential processing (no audio analysis)
logger.info("Processing files sequentially (audio analysis disabled)...")
import soundfile as sf
full_song_keywords = {'original mix', 'extended mix', 'full mix', 'edit', 'master', '320kbps', 'remix'}
for i, f in enumerate(unique_files):
f = Path(f)
if (i + 1) % max(1, total_files // 20) == 0 or (i + 1) == total_files:
logger.info(f"Processing files: {i+1}/{total_files} ({(i+1)/total_files*100:.1f}%)")
name = f.stem
clean_name = name.replace('_', ' ').replace('-', ' ').lower()
duration = 0.0
try:
info = sf.info(str(f))
duration = info.duration
except Exception:
duration = -1.0
is_full_song = duration > 45.0
try:
rel_path = f.relative_to(self.library_dir)
path_context = " ".join(rel_path.parts[:-1]).lower()
except ValueError:
path_context = ""
description = f"{clean_name} {path_context}"
texts_to_embed.append(description)
self.metadata.append({
'path': str(f),
'name': name,
'description': description,
'duration': duration,
'is_full_song': is_full_song,
'key': None,
'key_confidence': 0.0,
'spectral_centroid': None,
'is_harmonic': None
})
if HAS_ML and self.model:
logger.info(f"Generating vectors for {len(texts_to_embed)} samples. This might take a moment...")
embeddings = self.model.encode(texts_to_embed)
self.embeddings = embeddings
# BF-02: Save fingerprint alongside embeddings for auto-rebuild detection
fingerprint = self._get_library_fingerprint()
# Save the vectors
with open(self.index_file, 'w', encoding='utf-8') as f:
json.dump({
'metadata': self.metadata,
'embeddings': embeddings.tolist(),
'library_fingerprint': fingerprint
}, f)
logger.info(f"Saved {len(self.metadata)} embeddings to {self.index_file}.")
else:
logger.error("ML libraries not installed. Run 'pip install sentence-transformers scikit-learn numpy'")
# MJ-06: Genre keyword expansion for richer semantic search
GENRE_SEARCH_TERMS = {
'tech-house': ['groovy', 'driving', 'punchy', 'jackin', 'swinging', 'hypnotic', 'bouncy'],
'house': ['deep', 'soulful', 'warm', 'classic', 'funky'],
'techno': ['industrial', 'dark', 'raw', 'hypnotic', 'peak-time', 'acid'],
'trance': ['uplifting', 'ethereal', 'driving', 'euphoric'],
'deep-house': ['deep', 'chill', 'smooth', 'laidback', 'warm'],
'minimal': ['minimal', 'sparse', 'subtle', 'clean'],
'drum-and-bass': ['heavy', 'dark', 'neuro', 'rolling', 'aggressive'],
}
def enrich_query_with_genre(self, query: str, genre: str = "") -> str:
"""MJ-06: Enrich a search query with genre-specific terms."""
genre_lower = (genre or "").lower().strip()
terms = self.GENRE_SEARCH_TERMS.get(genre_lower, [])
if terms:
# Pick 2 random genre terms to enrich without overwhelming
import random as _rng
picked = _rng.sample(terms, min(2, len(terms)))
enriched = f"{query} {' '.join(picked)}"
logger.info(f"Enriched query for '{genre_lower}': '{query}' -> '{enriched}'")
return enriched
return query
def semantic_search(self, query: str, limit: int = 5, max_duration: float = 0.0, genre: str = "") -> List[Dict]:
"""
Returns a list of metadata dicts sorted by semantic relevance down to the limit.
Fallback to basic substring matching if ML is unavailable.
Args:
query: Semantic search terms
limit: Max results to return
max_duration: If > 0, filter out samples longer than this value
genre: Optional genre to enrich the search query (MJ-06)
"""
if not HAS_ML or self.model is None or len(self.embeddings) == 0:
logger.warning("ML unavailable, falling back to substring search.")
return self._fallback_search(query, limit, max_duration)
# MJ-06: Enrich query with genre terms
effective_query = self.enrich_query_with_genre(query, genre) if genre else query
logger.info(f"Performing semantic search for: '{effective_query}' (max_duration={max_duration})")
query_emb = self.model.encode([effective_query])
# Calculate cosine similarity between query and all stored embeddings
similarities = cosine_similarity(query_emb, self.embeddings)[0]
# Apply duration and full-song penalties/filtering
adjusted_similarities = similarities.copy()
for i, meta in enumerate(self.metadata):
# Filter out if it exceeds max_duration (if specified)
if max_duration > 0 and (meta.get('duration', 0) > max_duration or meta.get('duration', 0) < 0):
adjusted_similarities[i] = -1.0
continue
# Filter out explicit full songs
if meta.get('is_full_song', False) and max_duration > 0:
adjusted_similarities[i] = -1.0
continue
# Small penalty for longer samples if no max_duration specified
# to prioritize snippets over loops
if max_duration == 0 and meta.get('duration', 0) > 10.0:
adjusted_similarities[i] *= 0.9
# Get top indices from adjusted scores
top_indices = np.argsort(adjusted_similarities)[::-1][:limit]
results = []
for idx in top_indices:
score = float(adjusted_similarities[idx])
if score < 0: # All remaining candidates are invalid
break
meta = self.metadata[idx].copy()
meta['score'] = score
results.append(meta)
return results
def _fallback_search(self, query: str, limit: int = 5, max_duration: float = 0.0) -> List[Dict]:
query = query.lower()
scored = []
for m in self.metadata:
# Duration filter
if max_duration > 0 and (m.get('duration', 0) > max_duration or m.get('duration', 0) < 0):
continue
if m.get('is_full_song', False) and max_duration > 0:
continue
score = 0
if query in m['name'].lower():
score += 10
if query in m['description'].lower():
score += 5
if score > 0:
scored.append((score, m))
scored.sort(key=lambda x: x[0], reverse=True)
return [m for s, m in scored[:limit]]
if __name__ == "__main__":
import sys
import argparse
parser = argparse.ArgumentParser(description="Vector Manager for sample library indexing")
parser.add_argument("library_dir", nargs='?', help="Path to the sample library directory")
parser.add_argument("search_query", nargs='?', help="Optional search query to test")
parser.add_argument("--skip-audio-analysis", action="store_true",
help="Skip spectral audio analysis for faster rebuild (development mode)")
parser.add_argument("--rebuild", action="store_true",
help="Force rebuild of the index from scratch")
args = parser.parse_args()
if args.library_dir:
# Check if index exists and rebuild flag is set
index_file = Path(args.library_dir) / ".sample_embeddings.json"
if args.rebuild and index_file.exists():
logger.info(f"Removing existing index for rebuild: {index_file}")
index_file.unlink()
vm = VectorManager(args.library_dir, skip_audio_analysis=args.skip_audio_analysis)
if args.search_query:
res = vm.semantic_search(args.search_query)
print(f"Search Results for '{args.search_query}':")
for r in res:
print(f" Score: {r['score']:.3f}")
print(f" Name: {r['name']}")
print(f" Path: {r['path']}")
print(f" Key: {r.get('key', 'N/A')} (confidence: {r.get('key_confidence', 0):.2f})")
print(f" Spectral Centroid: {r.get('spectral_centroid', 'N/A')}")
print(f" Is Harmonic: {r.get('is_harmonic', 'N/A')}")
print()
else:
# Print summary of the loaded index
print(f"\nIndex Summary:")
print(f" Total samples: {len(vm.metadata)}")
# Count samples with spectral data
with_key = sum(1 for m in vm.metadata if m.get('key') is not None)
with_centroid = sum(1 for m in vm.metadata if m.get('spectral_centroid') is not None)
print(f" Samples with key detected: {with_key}")
print(f" Samples with spectral centroid: {with_centroid}")
else:
print("Usage: python vector_manager.py <library_dir> [search_query] [--skip-audio-analysis] [--rebuild]")
print("\nOptions:")
print(" --skip-audio-analysis Skip spectral analysis for faster rebuild")
print(" --rebuild Force rebuild index from scratch")

View File

@@ -1,222 +0,0 @@
# AbletonMCP-AI
Sistema hibrido para controlar Ableton Live 12 desde MCP y generar proyectos musicales complejos, orientados a Arrangement View.
Combina:
- un Remote Script dentro de Live
- un servidor MCP en Python (52+ tools)
- seleccion de samples desde biblioteca local con busqueda semantica ML
- reconstruccion guiada por referencias
- fallback de audio y capas MIDI/instrumentos
- buses, returns y snapshots de mezcla por seccion
- mezcla harmonica Camelot wheel para DJ sets
- generacion Tech House DJ-ready con intro/outro extendidas
Esta es la snapshot del proyecto al 2026-03-28.
## Estado actual
El sistema ya puede:
- generar proyectos completos en Arrangement View con samples de la biblioteca local
- crear estructura, tracks, scenes, cue points y guide track
- combinar MIDI, instrumentos stock y audio de biblioteca local (827 samples indexados)
- analizar un track de referencia y reconstruir un resultado original inspirado en ese material
- materializar capas `AUDIO ...` con samples reales (kick, bass, synth, vocal, FX, etc.)
- aplicar snapshots por seccion a tracks y returns durante el commit Session -> Arrangement
- operar con returns desde el runtime y desde el MCP
- buses de mezcla (DRUM BUS, BASS BUS, MUSIC WIDE, VOCAL BUS, FX WASH)
- capas derivadas (RESAMPLE REVERSE FX, RISER, DOWNLIFTER, STUTTER)
- generar estructuras DJ-ready con intro/outro de 32 compases para beatmatching
- mezcla harmonica con Camelot wheel (compatible keys, sugerencias de transicion)
- auto-descubrir tracks de referencia desde `librerias/reference/`
- previsualizar blueprints sin crear nada en Ableton
- regenerar secciones individuales
- persistir historia de generaciones y diversidad de samples entre sesiones
- busqueda semantica enriquecida por genero (tech-house, house, techno, trance, etc.)
- auto-reindexar la biblioteca cuando cambian los archivos
- validar automaticamente el set post-generacion
## Arquitectura resumida
1. `__init__.py`
Remote Script principal. Vive dentro de Ableton, abre el socket TCP y ejecuta comandos sobre la API de Live.
2. `MCP_Server/server.py`
Servidor MCP/FastMCP. Expone tools, normaliza aliases y habla con el Remote Script.
3. `MCP_Server/song_generator.py`
Generador musical. Construye blueprint de tracks, sections, performance, locators y returns.
4. `MCP_Server/reference_listener.py`
Escucha el audio de referencia y arma un plan de reconstruccion usando la biblioteca local.
5. `MCP_Server/sample_manager.py`, `sample_selector.py`, `audio_analyzer.py`
Indexado, busqueda, scoring y analisis de samples.
6. `MaxForLive/`
Devices `.amxd` para la ruta hibrida con M4L.
## Layout del repo
```text
AbletonMCP_AI/
|-- __init__.py
|-- Remote_Script.py
|-- start_server.bat
|-- .mcp.json
|-- README.md
|-- CLAUDE.md
|-- MaxForLive/
| |-- AbletonMCP_Engine.amxd
| |-- AbletonMCP_Engine.maxpat
| `-- AbletonMCP_SamplerPro.amxd
|-- MCP_Server/
| |-- server.py
| |-- song_generator.py
| |-- reference_listener.py
| |-- audio_analyzer.py
| |-- sample_manager.py
| |-- sample_selector.py
| |-- sample_index.py
| |-- socket_smoke_test.py
| |-- template_analyzer.py
| |-- ABLETUNES_TEMPLATE_NOTES.md
| `-- requirements.txt
`-- docs/
|-- AI_HANDOFF.md
|-- ARCHITECTURE.md
|-- GPU_DIRECTML.md
|-- MCP_TOOLS.md
|-- PROJECT_CONTEXT.md
|-- REMOTE_PROTOCOL.md
`-- SETUP_WINDOWS.md
```
## Documentacion
Leer primero:
- [CLAUDE.md](CLAUDE.md) - handoff amplio, cronologia completa, estado real, paths y notas operativas
- [AI_HANDOFF](docs/AI_HANDOFF.md) - handoff corto y operativo
- [PROJECT_CONTEXT](docs/PROJECT_CONTEXT.md) - direccion de producto y lecciones aprendidas
- [Arquitectura](docs/ARCHITECTURE.md)
- [Setup en Windows + Ableton](docs/SETUP_WINDOWS.md)
- [Tools MCP](docs/MCP_TOOLS.md)
- [Protocolo del Remote Script](docs/REMOTE_PROTOCOL.md)
- [GPU DirectML](docs/GPU_DIRECTML.md)
- [Notas del analisis de templates Abletunes](MCP_Server/ABLETUNES_TEMPLATE_NOTES.md)
## Quick start
### 1. Clonar y colocar en la carpeta de Ableton
```powershell
# Clonar el repo
git clone https://gitea.cbcren.online/renato97/ableton-mcp-ai.git
# Copiar a la carpeta de MIDI Remote Scripts
cp -r ableton-mcp-ai "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI"
```
### 2. Instalar dependencias Python
```powershell
cd "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server"
python -m pip install -r requirements.txt
```
### 3. Seleccionar el Control Surface en Live
- Abrir Ableton Live 12.
- Ir a `Preferences > Link/Tempo/MIDI`.
- Elegir `AbletonMCP_AI` como `Control Surface`.
### 4. Levantar el servidor MCP
```powershell
cd "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI"
python MCP_Server/server.py
```
O:
```powershell
start_server.bat
```
### 5. Probar conexion
```powershell
cd "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server"
python socket_smoke_test.py
```
## Ubicaciones externas esperadas
Este repo no incluye bibliotecas pesadas ni material generado. El stack espera estos recursos fuera del repo:
- biblioteca principal de samples (organizada por categorias):
`..\librerias\organized_samples` (827 samples indexados)
- biblioteca raw original:
`..\librerias\all_tracks`
- vector store para matching ML:
`..\librerias\vector_store`
- Ableton User Library para instalar el sampler M4L:
`%USERPROFILE%\Documents\Ableton\User Library`
- referencias MP3/WAV que se quieran analizar:
`..\sample`
- proyectos `.als`, renders y stems
## Flujo recomendado
1. Resetear el set.
2. Generar un track desde MCP o por socket.
3. Validar que el commit termine en Arrangement View.
4. Revisar audio tracks `AUDIO ...` y returns.
5. Ajustar perfiles, matching y snapshots.
## Comandos utiles
Generacion completa:
```text
generate_track(genre="tech-house", style="latin-industrial", bpm=0, key="", structure="standard")
generate_track(genre="tech-house", style="groovy", bpm=126, key="Am", structure="tech-house-dj")
generate_song(genre="tech-house", style="latin-industrial", bpm=0, key="", structure="club")
```
DJ / Harmonic mixing:
```text
get_harmonic_keys(key="Am")
get_compatible_keys(key="Am")
export_stems_config()
discover_reference_track()
get_reference_suggestions()
```
Utilidades de generacion:
```text
preview_generation(genre="tech-house", style="groovy", bpm=126, key="Am", structure="tech-house-dj")
regenerate_section(section_name="DROP A")
get_generation_history()
```
Transporte:
```text
start_playback()
stop_playback()
set_tempo(126)
```
Samples:
```text
search_samples("kick", category="kick", limit=10)
advanced_search_samples(query="vocal", category="vocals", bpm=128, key="F#m")
analyze_audio_file("C:\\ruta\\track.mp3")
```
## Licencia
Sin licencia publicada por ahora. Tratar este repo como privado/interno hasta definirla.

View File

@@ -1,943 +0,0 @@
"""
AbletonMCP AI - Remote Script para Ableton Live 12
Integración completa con MCP para generación musical por IA
Este script debe copiarse a:
C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\
Y luego seleccionarse en Preferencias > Link/Tempo/MIDI > Control Surface
"""
from __future__ import absolute_import, print_function, unicode_literals
from _Framework.ControlSurface import ControlSurface
import socket
import json
import threading
import time
import traceback
import os
import hashlib
# Python 2/3 compatibility
try:
import queue
except ImportError:
pass
try:
string_types = basestring
except NameError:
string_types = str
# Configuración
DEFAULT_PORT = 9877
HOST = "localhost"
CONFIG_FILE = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\track_config.json"
def create_instance(c_instance):
"""Crea y retorna la instancia del script"""
return AbletonMCP_AI(c_instance)
class AbletonMCP_AI(ControlSurface):
"""
Remote Script para integración MCP + AI con Ableton Live 12
Características:
- Servidor socket para comunicación con MCP Server
- Generación de tracks MIDI con patrones automáticos
- Carga de samples vía browser
- Integración con análisis de audio por IA
"""
def __init__(self, c_instance):
ControlSurface.__init__(self, c_instance)
self.log_message("=" * 60)
self.log_message("AbletonMCP AI - Inicializando...")
self.log_message("=" * 60)
# Referencia a la canción
self._song = self.song()
# Servidor socket
self.server = None
self.client_threads = []
self.server_thread = None
self.running = False
# Config watcher para generación automática
self._last_config_hash = None
self._config_watcher_thread = None
self._config_watcher_running = False
# Iniciar servidor
self.start_server()
# Iniciar watcher de configuración
self.start_config_watcher()
self.log_message("AbletonMCP AI inicializado correctamente")
self.show_message("AbletonMCP AI: Listo en puerto " + str(DEFAULT_PORT))
def disconnect(self):
"""Llamado cuando Ableton cierra o se remueve el script"""
self.log_message("AbletonMCP AI desconectando...")
self.running = False
self._config_watcher_running = False
# Detener servidor
if self.server:
try:
self.server.close()
except Exception:
pass
# Esperar threads
if self.server_thread and self.server_thread.is_alive():
self.server_thread.join(1.0)
if self._config_watcher_thread and self._config_watcher_thread.is_alive():
self._config_watcher_thread.join(0.5)
ControlSurface.disconnect(self)
self.log_message("AbletonMCP AI desconectado")
# =========================================================================
# SERVIDOR SOCKET
# =========================================================================
def start_server(self):
"""Inicia el servidor socket en un thread separado"""
try:
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind((HOST, DEFAULT_PORT))
self.server.listen(5)
self.running = True
self.server_thread = threading.Thread(target=self._server_thread)
self.server_thread.daemon = True
self.server_thread.start()
self.log_message("Servidor socket iniciado en puerto " + str(DEFAULT_PORT))
except Exception as e:
self.log_message("Error iniciando servidor: " + str(e))
self.show_message("AbletonMCP AI Error: " + str(e))
def _server_thread(self):
"""Thread principal del servidor - maneja conexiones"""
try:
self.server.settimeout(1.0)
while self.running:
try:
client, address = self.server.accept()
self.log_message("Conexión aceptada de " + str(address))
# Manejar cliente en thread separado
client_thread = threading.Thread(
target=self._handle_client,
args=(client,)
)
client_thread.daemon = True
client_thread.start()
self.client_threads.append(client_thread)
# Limpiar threads terminados
self.client_threads = [t for t in self.client_threads if t.is_alive()]
except socket.timeout:
continue
except Exception as e:
if self.running:
self.log_message("Error servidor: " + str(e))
time.sleep(0.5)
except Exception as e:
self.log_message("Error thread servidor: " + str(e))
def _handle_client(self, client):
"""Maneja comunicación con un cliente conectado"""
client.settimeout(None)
buffer = ''
try:
while self.running:
try:
data = client.recv(8192)
if not data:
self.log_message("Cliente desconectado")
break
# Acumular en buffer
try:
buffer += data.decode('utf-8')
except AttributeError:
buffer += data
# Intentar parsear JSON
try:
command = json.loads(buffer)
buffer = ''
self.log_message("Comando recibido: " + str(command.get("type", "unknown")))
# Procesar comando
response = self._process_command(command)
# Enviar respuesta
try:
client.sendall(json.dumps(response).encode('utf-8'))
except AttributeError:
client.sendall(json.dumps(response))
except ValueError:
# Datos incompletos, esperar más
continue
except Exception as e:
self.log_message("Error manejando cliente: " + str(e))
error_response = {"status": "error", "message": str(e)}
try:
client.sendall(json.dumps(error_response).encode('utf-8'))
except Exception:
pass
break
finally:
try:
client.close()
except Exception:
pass
# =========================================================================
# CONFIG WATCHER - Generación automática
# =========================================================================
def start_config_watcher(self):
"""Inicia el watcher de configuración para generación automática"""
self._config_watcher_running = True
self._config_watcher_thread = threading.Thread(target=self._config_watcher_loop)
self._config_watcher_thread.daemon = True
self._config_watcher_thread.start()
self.log_message("Config watcher iniciado")
def _config_watcher_loop(self):
"""Loop que monitorea cambios en el archivo de configuración"""
while self._config_watcher_running:
try:
if os.path.exists(CONFIG_FILE):
with open(CONFIG_FILE, 'r') as f:
content = f.read()
h = hashlib.md5(content.encode()).hexdigest()
if h != self._last_config_hash:
self._last_config_hash = h
self.log_message("Config cambiado - generando track...")
try:
config = json.loads(content)
# Solo procesar si tiene flag 'auto_generate'
if config.get('auto_generate', False):
self._generate_from_config(config)
except Exception as e:
self.log_message("Error generando desde config: " + str(e))
self.log_message(traceback.format_exc())
time.sleep(1.0) # Revisar cada segundo
except Exception as e:
self.log_message("Error en config watcher: " + str(e))
time.sleep(2.0)
def _generate_from_config(self, config):
"""Genera un track completo desde una configuración"""
try:
self.show_message("AI: Generando " + config.get('name', 'Track'))
# 1. Limpiar proyecto existente
self._clear_all_tracks()
# 2. Setear BPM
bpm = config.get('bpm', 128)
self._song.tempo = bpm
# 3. Crear tracks según configuración
tracks_config = config.get('tracks', [])
for idx, track_cfg in enumerate(tracks_config):
track_type = track_cfg.get('type', 'midi')
name = track_cfg.get('name', 'Track ' + str(idx))
if track_type == 'midi':
self._song.create_midi_track(idx)
elif track_type == 'audio':
self._song.create_audio_track(idx)
track = self._song.tracks[idx]
track.name = name
# Setear color si existe
if 'color' in track_cfg:
track.color = track_cfg['color']
# Crear clip con notas si existe configuración
if 'clip' in track_cfg:
clip_cfg = track_cfg['clip']
slot_idx = clip_cfg.get('slot', 0)
length = clip_cfg.get('length', 4.0)
# Asegurar que existan suficientes scenes
while len(self._song.scenes) <= slot_idx:
self._song.create_scene(-1)
clip_slot = track.clip_slots[slot_idx]
clip_slot.create_clip(length)
# Agregar notas
if 'notes' in clip_cfg:
clip = clip_slot.clip
for note in clip_cfg['notes']:
pitch = note.get('pitch', 60)
start = note.get('start', 0.0)
duration = note.get('duration', 0.25)
velocity = note.get('velocity', 100)
clip.add_new_note((pitch, start, duration, velocity, False))
# Cargar instrumento si se especifica
if 'instrument' in track_cfg:
instrument_name = track_cfg['instrument']
# Usar browser para cargar
self._load_instrument_by_name(track, instrument_name)
self.show_message("AI: Track generado exitosamente!")
self.log_message("Generación completada: " + str(len(tracks_config)) + " tracks")
except Exception as e:
self.log_message("Error en generación: " + str(e))
self.log_message(traceback.format_exc())
self.show_message("AI Error: " + str(e))
def _clear_all_tracks(self):
"""Elimina todos los tracks existentes"""
try:
while len(self._song.tracks) > 0:
self._song.delete_track(len(self._song.tracks) - 1)
except Exception as e:
self.log_message("Error limpiando tracks: " + str(e))
def _load_instrument_by_name(self, track, name):
"""Carga un instrumento en el track por nombre"""
try:
browser = self.application().browser
# Buscar en categorías de instrumentos
if hasattr(browser, 'instruments'):
for item in self._search_browser_items(browser.instruments, name):
try:
browser.load_item(item)
self.log_message("Instrumento cargado: " + name)
return True
except Exception as e:
self.log_message("Error cargando instrumento: " + str(e))
return False
except Exception as e:
self.log_message("Error buscando instrumento: " + str(e))
return False
def _search_browser_items(self, root, name, depth=0, max_depth=5):
"""Busca items en el browser recursivamente"""
if depth > max_depth or root is None:
return []
results = []
try:
# Verificar si el nombre coincide
item_name = getattr(root, 'name', '').lower()
if name.lower() in item_name or item_name in name.lower():
results.append(root)
# Buscar en hijos
if hasattr(root, 'children'):
for child in root.children:
results.extend(self._search_browser_items(child, name, depth + 1, max_depth))
except Exception:
pass
return results
# =========================================================================
# PROCESAMIENTO DE COMANDOS
# =========================================================================
def _process_command(self, command):
"""Procesa un comando recibido y retorna respuesta"""
command_type = command.get("type", "")
params = command.get("params", {})
try:
# Comandos de información
if command_type == "get_session_info":
return self._cmd_get_session_info()
elif command_type == "get_track_info":
return self._cmd_get_track_info(params)
elif command_type == "get_tracks":
return self._cmd_get_tracks()
# Comandos de tracks
elif command_type == "create_midi_track":
return self._cmd_create_midi_track(params)
elif command_type == "create_audio_track":
return self._cmd_create_audio_track(params)
elif command_type == "set_track_name":
return self._cmd_set_track_name(params)
elif command_type == "set_track_volume":
return self._cmd_set_track_volume(params)
elif command_type == "set_track_pan":
return self._cmd_set_track_pan(params)
elif command_type == "set_track_mute":
return self._cmd_set_track_mute(params)
elif command_type == "set_track_solo":
return self._cmd_set_track_solo(params)
elif command_type == "set_track_color":
return self._cmd_set_track_color(params)
# Comandos de clips
elif command_type == "create_clip":
return self._cmd_create_clip(params)
elif command_type == "add_notes_to_clip":
return self._cmd_add_notes_to_clip(params)
elif command_type == "set_clip_name":
return self._cmd_set_clip_name(params)
elif command_type == "set_clip_envelope":
return self._cmd_set_clip_envelope(params)
elif command_type == "fire_clip":
return self._cmd_fire_clip(params)
elif command_type == "stop_clip":
return self._cmd_stop_clip(params)
# Comandos de transporte
elif command_type == "set_tempo":
return self._cmd_set_tempo(params)
elif command_type == "start_playback":
return self._cmd_start_playback()
elif command_type == "stop_playback":
return self._cmd_stop_playback()
# Comandos de escenas
elif command_type == "create_scene":
return self._cmd_create_scene(params)
elif command_type == "set_scene_name":
return self._cmd_set_scene_name(params)
elif command_type == "fire_scene":
return self._cmd_fire_scene(params)
# Comandos de dispositivos
elif command_type == "load_instrument_or_effect":
return self._cmd_load_instrument(params)
elif command_type == "set_device_parameter":
return self._cmd_set_device_parameter(params)
# Comando de generación AI
elif command_type == "generate_track":
return self._cmd_generate_track(params)
else:
return {"status": "error", "message": "Comando desconocido: " + command_type}
except Exception as e:
self.log_message("Error procesando comando " + command_type + ": " + str(e))
self.log_message(traceback.format_exc())
return {"status": "error", "message": str(e)}
# =========================================================================
# IMPLEMENTACIÓN DE COMANDOS
# =========================================================================
def _cmd_get_session_info(self):
"""Retorna información de la sesión actual"""
return {
"status": "success",
"result": {
"tempo": self._song.tempo,
"signature_numerator": self._song.signature_numerator,
"signature_denominator": self._song.signature_denominator,
"is_playing": self._song.is_playing,
"current_song_time": self._song.current_song_time,
"loop_start": self._song.loop_start,
"loop_length": self._song.loop_length,
"num_tracks": len(self._song.tracks),
"num_scenes": len(self._song.scenes),
"num_return_tracks": len(self._song.return_tracks)
}
}
def _cmd_get_track_info(self, params):
"""Retorna información de un track específico"""
idx = params.get("track_index", 0)
if idx < 0 or idx >= len(self._song.tracks):
return {"status": "error", "message": "Track index fuera de rango"}
track = self._song.tracks[idx]
# Determinar tipo de track
track_type = "unknown"
if track.has_midi_input:
track_type = "midi"
elif track.has_audio_input:
track_type = "audio"
return {
"status": "success",
"result": {
"index": idx,
"name": track.name,
"type": track_type,
"color": track.color,
"mute": track.mute,
"solo": track.solo,
"arm": track.arm,
"volume": track.mixer_device.volume.value if track.mixer_device else 0.85,
"pan": track.mixer_device.panning.value if track.mixer_device else 0.0,
"num_clips": len(track.clip_slots),
"num_devices": len(track.devices)
}
}
def _cmd_get_tracks(self):
"""Retorna lista de todos los tracks"""
tracks = []
for i, track in enumerate(self._song.tracks):
track_type = "midi" if track.has_midi_input else "audio" if track.has_audio_input else "unknown"
tracks.append({
"index": i,
"name": track.name,
"type": track_type,
"color": track.color,
"mute": track.mute,
"solo": track.solo
})
return {"status": "success", "result": tracks}
def _cmd_create_midi_track(self, params):
"""Crea un track MIDI"""
index = params.get("index", -1)
self._song.create_midi_track(index)
return {"status": "success", "result": {"message": "MIDI track creado", "index": index}}
def _cmd_create_audio_track(self, params):
"""Crea un track de audio"""
index = params.get("index", -1)
self._song.create_audio_track(index)
return {"status": "success", "result": {"message": "Audio track creado", "index": index}}
def _cmd_set_track_name(self, params):
"""Setea el nombre de un track"""
idx = params.get("track_index", 0)
name = params.get("name", "Track")
self._song.tracks[idx].name = name
return {"status": "success", "result": {"message": "Nombre actualizado", "name": name}}
def _cmd_set_track_volume(self, params):
"""Setea el volumen de un track"""
idx = params.get("track_index", 0)
volume = params.get("volume", 0.85)
track = self._song.tracks[idx]
if track.mixer_device and track.mixer_device.volume:
track.mixer_device.volume.value = volume
return {"status": "success"}
def _cmd_set_track_pan(self, params):
"""Setea el pan de un track"""
idx = params.get("track_index", 0)
pan = params.get("pan", 0.0)
track = self._song.tracks[idx]
if track.mixer_device and track.mixer_device.panning:
track.mixer_device.panning.value = pan
return {"status": "success"}
def _cmd_set_track_mute(self, params):
"""Setea el mute de un track"""
idx = params.get("track_index", 0)
mute = params.get("mute", True)
track = self._song.tracks[idx]
current_mute = track.mute
if current_mute != mute:
track.mute = mute
return {"status": "success", "result": {"mute": track.mute, "track_index": idx}}
def _cmd_set_track_solo(self, params):
"""Setea el solo de un track"""
idx = params.get("track_index", 0)
solo = params.get("solo", True)
self._song.tracks[idx].solo = solo
return {"status": "success"}
def _cmd_set_track_color(self, params):
"""Setea el color de un track"""
idx = params.get("track_index", 0)
color = params.get("color", 0)
self._song.tracks[idx].color = color
return {"status": "success"}
def _cmd_create_clip(self, params):
"""Crea un clip en un slot"""
track_idx = params.get("track_index", 0)
clip_idx = params.get("clip_index", 0)
length = params.get("length", 4.0)
track = self._song.tracks[track_idx]
# Asegurar que existan suficientes scenes
while len(self._song.scenes) <= clip_idx:
self._song.create_scene(-1)
clip_slot = track.clip_slots[clip_idx]
clip_slot.create_clip(length)
return {"status": "success", "result": {"message": "Clip creado"}}
def _cmd_add_notes_to_clip(self, params):
"""Agrega notas a un clip MIDI"""
track_idx = params.get("track_index", 0)
clip_idx = params.get("clip_index", 0)
notes = params.get("notes", [])
track = self._song.tracks[track_idx]
clip_slot = track.clip_slots[clip_idx]
if not clip_slot.has_clip:
return {"status": "error", "message": "No hay clip en este slot"}
clip = clip_slot.clip
for note in notes:
pitch = note.get("pitch", 60)
start = note.get("start", 0.0)
duration = note.get("duration", 0.25)
velocity = note.get("velocity", 100)
clip.add_new_note((pitch, start, duration, velocity, False))
return {"status": "success", "result": {"num_notes_added": len(notes)}}
def _cmd_set_clip_name(self, params):
"""Setea el nombre de un clip"""
track_idx = params.get("track_index", 0)
clip_idx = params.get("clip_index", 0)
name = params.get("name", "Clip")
clip_slot = self._song.tracks[track_idx].clip_slots[clip_idx]
if clip_slot.has_clip:
clip_slot.clip.name = name
return {"status": "success"}
def _cmd_fire_clip(self, params):
"""Dispara un clip"""
track_idx = params.get("track_index", 0)
clip_idx = params.get("clip_index", 0)
clip_slot = self._song.tracks[track_idx].clip_slots[clip_idx]
clip_slot.fire()
return {"status": "success"}
def _cmd_stop_clip(self, params):
"""Detiene un clip"""
track_idx = params.get("track_index", 0)
clip_idx = params.get("clip_index", 0)
clip_slot = self._song.tracks[track_idx].clip_slots[clip_idx]
clip_slot.stop()
return {"status": "success"}
def _cmd_set_tempo(self, params):
"""Setea el BPM"""
tempo = params.get("tempo", 120.0)
self._song.tempo = tempo
return {"status": "success", "result": {"tempo": tempo}}
def _cmd_start_playback(self):
"""Inicia reproducción"""
self._song.start_playing()
return {"status": "success"}
def _cmd_stop_playback(self):
"""Detiene reproducción"""
self._song.stop_playing()
return {"status": "success"}
def _cmd_create_scene(self, params):
"""Crea una scene"""
index = params.get("index", -1)
self._song.create_scene(index)
return {"status": "success"}
def _cmd_set_scene_name(self, params):
"""Setea el nombre de una scene"""
idx = params.get("scene_index", 0)
name = params.get("name", "Scene")
self._song.scenes[idx].name = name
return {"status": "success"}
def _cmd_fire_scene(self, params):
"""Dispara una scene"""
idx = params.get("scene_index", 0)
scene = self._song.scenes[idx]
scene.fire()
if not self._song.is_playing:
self._song.start_playing()
return {"status": "success"}
def _cmd_load_instrument(self, params):
"""Carga un instrumento en un track"""
track_idx = params.get("track_index", 0)
name = params.get("name", "")
track = self._song.tracks[track_idx]
success = self._load_instrument_by_name(track, name)
if success:
return {"status": "success", "result": {"message": "Instrumento cargado"}}
else:
return {"status": "error", "message": "No se pudo cargar el instrumento"}
def _cmd_set_device_parameter(self, params):
"""Setea un parámetro de dispositivo"""
track_idx = params.get("track_index", 0)
device_idx = params.get("device_index", 0)
param_idx = params.get("parameter_index", 0)
value = params.get("value", 0.0)
track = self._song.tracks[track_idx]
device = track.devices[device_idx]
param = device.parameters[param_idx]
param.value = value
return {"status": "success"}
def _cmd_generate_track(self, params):
"""Comando principal de generación de tracks"""
# Este comando delega a _generate_from_config
# pero puede ser llamado directamente vía socket
try:
self._generate_from_config(params)
return {"status": "success", "result": {"message": "Track generado exitosamente"}}
except Exception as e:
return {"status": "error", "message": str(e)}
def _cmd_set_clip_envelope(self, params):
"""Setea un envelope (volume, pan, send) en un clip con puntos de automatización"""
track_idx = params.get("track_index", 0)
clip_idx = params.get("clip_index", 0)
envelope_name = params.get("envelope", "volume") # volume, pan, send
points = params.get("points", [])
track = self._song.tracks[track_idx]
clip_slot = track.clip_slots[clip_idx]
if not clip_slot.has_clip:
return {"status": "error", "message": "No hay clip en este slot"}
clip = clip_slot.clip
# Obtener el envelope correcto
if envelope_name == "volume":
envelope = clip.volume_envelope
elif envelope_name == "pan":
envelope = clip.pan_envelope
elif envelope_name == "send":
send_idx = params.get("send_index", 0)
if send_idx < len(track.mixer_device.sends):
envelope = track.mixer_device.sends[send_idx].envelope
else:
return {"status": "error", "message": "Send index fuera de rango"}
else:
return {"status": "error", "message": "Envelope type desconocido: " + envelope_name}
# Limpiar puntos existentes si se especifica
clear_existing = params.get("clear_existing", False)
if clear_existing:
while len(envelope.points) > 0:
envelope.delete_point(len(envelope.points) - 1)
# Agregar puntos de automatización desde el array de puntos
if points:
for point in points:
if isinstance(point, dict):
time_pos = point.get("time", 0.0)
value = point.get("value", 0.0)
envelope.add_new_point(time_pos, value)
return {"status": "success", "result": {"message": "Envelope seteado con puntos", "points_added": len(points)}}
else:
return {"status": "error", "message": "No se especificaron puntos de automatización"}
def _cmd_calibrate_track_gain(self, params):
"""Calibra el gain de un track basado en loudness"""
track_idx = params.get("track_index", 0)
target_loudness = params.get("target_loudness", -14.0) # LUFS target
measurement_window = params.get("measurement_window", 0.1) # segundos
track = self._song.tracks[track_idx]
if not track.has_audio_input:
return {"status": "error", "message": "Track no es de audio"}
# Obtener el peak volume actual
current_volume = track.mixer_device.volume.value
# Calibrar para alcanzar el target (simplificado)
# En una implementación real, usaríamos análisis de loudness real
# Por ahora, ajustamos proporcionalmente
adjustment = target_loudness / -20.0 # Aproximación
new_volume = max(0.0, min(1.0, current_volume * adjustment))
track.mixer_device.volume.value = new_volume
return {
"status": "success",
"result": {
"message": "Gain calibrado",
"current_volume": current_volume,
"new_volume": new_volume,
"target_loudness": target_loudness
}
}
def _cmd_apply_compression(self, params):
"""Aplica compresión a un track"""
track_idx = params.get("track_index", 0)
threshold = params.get("threshold", -24.0)
ratio = params.get("ratio", 4.0)
attack = params.get("attack", 0.01)
release = params.get("release", 0.1)
track = self._song.tracks[track_idx]
# Buscar o crear compressor
compressor = None
for device in track.devices:
if device.name == "Compressor":
compressor = device
break
if compressor is None:
# Intentar cargar Compressor desde browser
browser = self.application().browser
for item in self._search_browser_items(browser.effects, "Compressor"):
try:
browser.load_item(item)
compressor = track.devices[-1]
break
except Exception:
pass
if compressor:
# Setear parámetros (índices pueden variar según versión)
try:
if len(compressor.parameters) > 0:
compressor.parameters[0].value = threshold # Threshold
if len(compressor.parameters) > 1:
compressor.parameters[1].value = ratio # Ratio
if len(compressor.parameters) > 2:
compressor.parameters[2].value = attack # Attack
if len(compressor.parameters) > 3:
compressor.parameters[3].value = release # Release
except Exception:
pass
return {"status": "success", "result": {"message": "Compresor aplicado"}}
else:
return {"status": "error", "message": "No se pudo cargar compresor"}
def _cmd_apply_limiting(self, params):
"""Aplica limiting para loudness normalization"""
track_idx = params.get("track_index", 0)
target_loudness = params.get("target_loudness", -1.0) # LUFS para master
lookahead = params.get("lookahead", 0.01)
release = params.get("release", 0.05)
track = self._song.tracks[track_idx]
# Buscar o crear limiter
limiter = None
for device in track.devices:
if "Limiter" in device.name:
limiter = device
break
if limiter is None:
# Intentar cargar Limiter desde browser
browser = self.application().browser
for item in self._search_browser_items(browser.effects, "Limiter"):
try:
browser.load_item(item)
limiter = track.devices[-1]
break
except Exception:
pass
if limiter:
# Setear parámetros
try:
if len(limiter.parameters) > 0:
limiter.parameters[0].value = target_loudness # Gain
if len(limiter.parameters) > 1:
limiter.parameters[1].value = lookahead # Lookahead
if len(limiter.parameters) > 2:
limiter.parameters[2].value = release # Release
except Exception:
pass
return {"status": "success", "result": {"message": "Limiter aplicado"}}
else:
return {"status": "error", "message": "No se pudo cargar limiter"}
def _cmd_master_loudness_normalization(self, params):
"""Normaliza el loudness del master track"""
track_idx = params.get("track_index", 0)
target_loudness = params.get("target_loudness", -14.0)
track = self._song.tracks[track_idx]
# Calibrar gain
current_volume = track.mixer_device.volume.value
adjustment = 10 ** ((target_loudness - (-14)) / 20) # Aproximación
new_volume = max(0.0, min(1.0, current_volume * adjustment))
track.mixer_device.volume.value = new_volume
return {
"status": "success",
"result": {
"message": "Loudness normalizado",
"target_loudness": target_loudness,
"new_volume": new_volume
}
}

View File

@@ -1,14 +0,0 @@
{
"implementer": {
"description": "Implements the requested code changes with minimal diff.",
"prompt": "You are a focused implementation worker. Make the requested code changes, keep the diff small, and do not overclaim."
},
"verifier": {
"description": "Runs validations and checks whether the claimed work is actually complete.",
"prompt": "You are a strict verifier. Run the requested validations, compare code against claims, and report gaps clearly."
},
"reporter": {
"description": "Writes the final worker report truthfully.",
"prompt": "You are a truthful technical reporter. Summarize only what was actually changed and verified."
}
}

View File

@@ -1,30 +0,0 @@
{
"planner": {
"description": "Breaks the task into a small, realistic execution plan and identifies the critical path.",
"prompt": "You are the planning agent. Read the task, identify the minimum safe plan, and tell the team what to implement first. Keep the plan concrete and short."
},
"implementer_core": {
"description": "Implements the main code changes with a minimal diff.",
"prompt": "You are the core implementation agent. Make the requested code changes with the smallest coherent diff. Do not overclaim."
},
"implementer_aux": {
"description": "Implements helper scripts, manifests, reports, and offline tooling.",
"prompt": "You are the auxiliary implementation agent. Focus on CLI helpers, manifests, reports, and utility scripts. Keep changes isolated."
},
"validator": {
"description": "Runs validations and checks whether the implementation actually works.",
"prompt": "You are the validation agent. Run the required validations, inspect failures carefully, and report only what really passed."
},
"retrieval_reviewer": {
"description": "Reviews retrieval/indexing logic for role contamination, cache compatibility, and data-shape issues.",
"prompt": "You are the retrieval reviewer. Inspect role safety, cache compatibility, manifests, and offline retrieval quality. Flag contamination and schema mismatches."
},
"runtime_guard": {
"description": "Protects the Ableton runtime and blocks risky unrelated changes.",
"prompt": "You are the runtime guard. Prevent unnecessary edits to the Remote Script, runtime socket behavior, or generation path when the task does not require it."
},
"reporter": {
"description": "Writes the final task report truthfully and concisely.",
"prompt": "You are the reporting agent. Write a technical report that only claims what was truly changed and verified."
}
}

View File

@@ -1,94 +0,0 @@
param(
[Parameter(Mandatory = $true)]
[string]$TaskFile,
[Parameter(Mandatory = $true)]
[string]$ReportFile,
[Parameter(Mandatory = $true)]
[string]$ProjectRoot,
[Parameter(Mandatory = $true)]
[string]$OutputFile,
[string]$CodexModel = ""
)
$ErrorActionPreference = "Stop"
function Resolve-CodexCommand() {
$cmd = Get-Command "codex.cmd" -ErrorAction SilentlyContinue
if ($cmd) {
return $cmd.Source
}
$fallback = Get-Command "codex" -ErrorAction SilentlyContinue
if ($fallback) {
return $fallback.Source
}
throw "Command not found: codex"
}
$taskPath = (Resolve-Path -LiteralPath $TaskFile).Path
$reportPath = (Resolve-Path -LiteralPath $ReportFile).Path
$projectPath = (Resolve-Path -LiteralPath $ProjectRoot).Path
$outputPath = [System.IO.Path]::GetFullPath($OutputFile)
$codexCommand = Resolve-CodexCommand
$reviewPrompt = @"
Read this worker task file:
$taskPath
Read this GLM report:
$reportPath
Your job:
1. Inspect the real diff in the repository.
2. Verify whether GLM actually implemented what the report claims.
3. Fix anything incorrect, incomplete, or unsafe.
4. Run the relevant validations mentioned by the task/report.
5. Leave the repository in the best corrected state you can reach in one pass.
6. Write a concise final summary to the output file configured by the CLI.
Be strict about overclaims. The code is the source of truth, not the report.
"@
$codexArgs = @(
"exec",
"--dangerously-bypass-approvals-and-sandbox",
"-C", $projectPath,
"-o", $outputPath
)
if (-not [string]::IsNullOrWhiteSpace($CodexModel)) {
$codexArgs += @("-m", $CodexModel)
}
$codexArgs += $reviewPrompt
$stdoutPath = [System.IO.Path]::Combine([System.IO.Path]::GetDirectoryName($outputPath), "codex_review_stdout.tmp.txt")
$stderrPath = [System.IO.Path]::Combine([System.IO.Path]::GetDirectoryName($outputPath), "codex_review_stderr.tmp.txt")
if (Test-Path -LiteralPath $stdoutPath) { Remove-Item -LiteralPath $stdoutPath -Force }
if (Test-Path -LiteralPath $stderrPath) { Remove-Item -LiteralPath $stderrPath -Force }
Push-Location $projectPath
try {
& $codexCommand @codexArgs 1> $stdoutPath 2> $stderrPath
$exitCode = $LASTEXITCODE
}
finally {
Pop-Location
}
if (Test-Path -LiteralPath $stdoutPath) {
Get-Content -LiteralPath $stdoutPath
}
if (Test-Path -LiteralPath $stderrPath) {
Get-Content -LiteralPath $stderrPath
}
if ($exitCode -ne 0) {
throw "Codex exited with code $exitCode"
}

View File

@@ -1,401 +0,0 @@
{
"benchmark_info": {
"library_dir": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks",
"top_n": 3,
"roles": [
"kick",
"snare",
"hat",
"bass_loop",
"vocal_loop",
"top_loop"
],
"timestamp": "2026-03-20T16:36:16",
"device": "directml"
},
"references": [
{
"file_name": "Mr. Pauer, Goyo - Química (Video Oficial).mp3",
"path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\sample\\Mr. Pauer, Goyo - Química (Video Oficial).mp3",
"analysis_time_seconds": 3.09,
"reference_info": {
"tempo": 123.047,
"key": "Cm",
"duration": 145.31,
"rms_mean": 0.17201,
"onset_mean": 1.956218,
"spectral_centroid": 2465.478
},
"sections": [
{
"kind": "verse",
"start": 0.0,
"end": 14.954,
"bars": 8
},
{
"kind": "build",
"start": 14.954,
"end": 37.779,
"bars": 12
},
{
"kind": "verse",
"start": 37.779,
"end": 46.811,
"bars": 5
},
{
"kind": "verse",
"start": 46.811,
"end": 54.822,
"bars": 4
},
{
"kind": "drop",
"start": 54.822,
"end": 62.833,
"bars": 4
},
{
"kind": "build",
"start": 62.833,
"end": 70.844,
"bars": 4
},
{
"kind": "verse",
"start": 70.844,
"end": 92.415,
"bars": 11
},
{
"kind": "build",
"start": 92.415,
"end": 101.03,
"bars": 4
},
{
"kind": "verse",
"start": 101.03,
"end": 109.041,
"bars": 4
},
{
"kind": "build",
"start": 109.041,
"end": 117.098,
"bars": 4
},
{
"kind": "outro",
"start": 117.098,
"end": 125.109,
"bars": 4
},
{
"kind": "outro",
"start": 125.109,
"end": 133.422,
"bars": 4
},
{
"kind": "outro",
"start": 133.422,
"end": 141.433,
"bars": 4
}
],
"role_candidates": {
"kick": {
"total_available": 16,
"top_candidates": [
{
"rank": 1,
"file_name": "BBH - Primer Impacto - Kick 5.wav",
"path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Kick 5.wav",
"score": 0.658173,
"cosine": 0.677478,
"segment_score": 0.807539,
"catalog_score": 0.540981,
"tempo": 117.454,
"key": "Gm",
"duration": 0.5
},
{
"rank": 2,
"file_name": "BBH - Primer Impacto - Kick 1.wav",
"path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Kick 1.wav",
"score": 0.650067,
"cosine": 0.633787,
"segment_score": 0.771427,
"catalog_score": 0.540981,
"tempo": 117.454,
"key": "Am",
"duration": 0.5
},
{
"rank": 3,
"file_name": "BBH - Primer Impacto - Kick 8.wav",
"path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Kick 8.wav",
"score": 0.642297,
"cosine": 0.689128,
"segment_score": 0.809562,
"catalog_score": 0.5,
"tempo": 258.398,
"key": "Fm",
"duration": 0.484
}
]
},
"snare": {
"total_available": 28,
"top_candidates": [
{
"rank": 1,
"file_name": "MT Clap & Snare Hit 05.wav",
"path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Clap & Snare Hit 05.wav",
"score": 0.642515,
"cosine": 0.742869,
"segment_score": 0.87862,
"catalog_score": 0.529168,
"tempo": 258.398,
"key": "Dm",
"duration": 0.72
},
{
"rank": 2,
"file_name": "MT Clap & Snare Hit 15.wav",
"path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Clap & Snare Hit 15.wav",
"score": 0.623005,
"cosine": 0.754711,
"segment_score": 0.800798,
"catalog_score": 0.518602,
"tempo": 234.908,
"key": "Dm",
"duration": 0.642
},
{
"rank": 3,
"file_name": "BBH - Primer Impacto - Clap 1.wav",
"path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Clap 1.wav",
"score": 0.621014,
"cosine": 0.780775,
"segment_score": 0.805699,
"catalog_score": 0.528549,
"tempo": 117.454,
"key": "A#m",
"duration": 0.545
}
]
},
"hat": {
"total_available": 32,
"top_candidates": [
{
"rank": 1,
"file_name": "BBH - Primer Impacto - Open Hat 2.wav",
"path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Open Hat 2.wav",
"score": 0.602448,
"cosine": 0.750913,
"segment_score": 0.789455,
"catalog_score": 0.539635,
"tempo": 258.398,
"key": "Cm",
"duration": 0.625
},
{
"rank": 2,
"file_name": "BBH - Primer Impacto - Open Hat 9.wav",
"path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Open Hat 9.wav",
"score": 0.592739,
"cosine": 0.764186,
"segment_score": 0.682635,
"catalog_score": 0.5,
"tempo": 258.398,
"key": "Gm",
"duration": 0.38
},
{
"rank": 3,
"file_name": "MT Hat Hit 04.wav",
"path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Hat Hit 04.wav",
"score": 0.55811,
"cosine": 0.747485,
"segment_score": 0.747228,
"catalog_score": 0.5,
"tempo": 135.999,
"key": "G",
"duration": 0.233
}
]
},
"bass_loop": {
"total_available": 37,
"top_candidates": [
{
"rank": 1,
"file_name": "Bass_Loop_03_G#m_125.wav",
"path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\Bass_Loop_03_G#m_125.wav",
"score": 0.877488,
"cosine": 0.803278,
"segment_score": 0.883592,
"catalog_score": 0.617711,
"tempo": 123.047,
"key": "Cm",
"duration": 7.68
},
{
"rank": 2,
"file_name": "BBH - Primer Impacto - Bass Loop 06 Dmin.wav",
"path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Bass Loop 06 Dmin.wav",
"score": 0.82587,
"cosine": 0.698374,
"segment_score": 0.799662,
"catalog_score": 0.890835,
"tempo": 123.047,
"key": "Dm",
"duration": 3.84
},
{
"rank": 3,
"file_name": "Bass_Loop_05_Cm_125.wav",
"path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\Bass_Loop_05_Cm_125.wav",
"score": 0.818811,
"cosine": 0.695605,
"segment_score": 0.883218,
"catalog_score": 0.617711,
"tempo": 63.024,
"key": "C",
"duration": 7.68
}
]
},
"vocal_loop": {
"total_available": 24,
"top_candidates": [
{
"rank": 1,
"file_name": "MT Vocal Loop 12 125.wav",
"path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Vocal Loop 12 125.wav",
"score": 0.932334,
"cosine": 0.827361,
"segment_score": 0.923902,
"catalog_score": 0.999437,
"tempo": 123.047,
"key": "D#",
"duration": 1.92
},
{
"rank": 2,
"file_name": "MT Vocal Loop 11 125.wav",
"path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Vocal Loop 11 125.wav",
"score": 0.921701,
"cosine": 0.832834,
"segment_score": 0.920162,
"catalog_score": 0.948909,
"tempo": 123.047,
"key": "D#m",
"duration": 1.92
},
{
"rank": 3,
"file_name": "MT Vocal Loop 02 128.wav",
"path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Vocal Loop 02 128.wav",
"score": 0.862394,
"cosine": 0.845787,
"segment_score": 0.954025,
"catalog_score": 0.882953,
"tempo": 123.047,
"key": "G#m",
"duration": 3.75
}
]
},
"top_loop": {
"total_available": 144,
"top_candidates": [
{
"rank": 1,
"file_name": "Top_Loop_11_Any_125.wav",
"path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\Top_Loop_11_Any_125.wav",
"score": 0.906089,
"cosine": 0.752537,
"segment_score": 0.768995,
"catalog_score": 0.859437,
"tempo": 123.047,
"key": "Cm",
"duration": 7.68
},
{
"rank": 2,
"file_name": "drum_loop_21_am_125.wav",
"path": "c:\\programdata\\ableton\\live 12 suite\\resources\\midi remote scripts\\librerias\\all_tracks\\drum_loop_21_am_125.wav",
"score": 0.893566,
"cosine": 0.813975,
"segment_score": 0.954219,
"catalog_score": 0.799711,
"tempo": 123.047,
"key": "A#m",
"duration": 7.68
},
{
"rank": 3,
"file_name": "drum_loop_23_am_125.wav",
"path": "c:\\programdata\\ableton\\live 12 suite\\resources\\midi remote scripts\\librerias\\all_tracks\\drum_loop_23_am_125.wav",
"score": 0.887869,
"cosine": 0.822104,
"segment_score": 0.94301,
"catalog_score": 0.799711,
"tempo": 123.047,
"key": "A#m",
"duration": 7.68
}
]
}
}
}
],
"contamination_analysis": {
"cross_role_files": [],
"potential_mismatches": [],
"role_score_stats": {
"kick": {
"min": 0.6423,
"max": 0.6582,
"avg": 0.6502,
"count": 3
},
"snare": {
"min": 0.621,
"max": 0.6425,
"avg": 0.6288,
"count": 3
},
"hat": {
"min": 0.5581,
"max": 0.6024,
"avg": 0.5844,
"count": 3
},
"bass_loop": {
"min": 0.8188,
"max": 0.8775,
"avg": 0.8407,
"count": 3
},
"vocal_loop": {
"min": 0.8624,
"max": 0.9323,
"avg": 0.9055,
"count": 3
},
"top_loop": {
"min": 0.8879,
"max": 0.9061,
"avg": 0.8958,
"count": 3
}
}
}
}

View File

@@ -1,157 +0,0 @@
param(
[Parameter(Mandatory = $true)]
[string]$TaskFile,
[Parameter(Mandatory = $true)]
[string]$ReportFile,
[string]$ProjectRoot = (Resolve-Path (Join-Path $PSScriptRoot "..")).Path,
[string]$GlmModel = "glm-5",
[string]$GlmBaseUrl = $(if ($env:ANTHROPIC_BASE_URL) { $env:ANTHROPIC_BASE_URL } else { "https://coding-intl.dashscope.aliyuncs.com/apps/anthropic" }),
[string]$GlmAuthToken = $env:ANTHROPIC_AUTH_TOKEN,
[string]$GlmAgentsFile = "",
[string]$CodexModel = "",
[string]$TelegramBotToken = $env:TELEGRAM_BOT_TOKEN,
[string]$TelegramChatId = $env:TELEGRAM_CHAT_ID,
[string]$TelegramConfigPath = (Join-Path $PSScriptRoot "telegram.local.json"),
[switch]$SkipCodexReview
)
$ErrorActionPreference = "Stop"
function Require-Command([string]$Name) {
if (-not (Get-Command $Name -ErrorAction SilentlyContinue)) {
throw "Command not found: $Name"
}
}
function Resolve-RepoPath([string]$BasePath, [string]$TargetPath) {
if ([System.IO.Path]::IsPathRooted($TargetPath)) {
return [System.IO.Path]::GetFullPath($TargetPath)
}
return [System.IO.Path]::GetFullPath((Join-Path $BasePath $TargetPath))
}
function Resolve-TelegramSettings() {
if (([string]::IsNullOrWhiteSpace($TelegramBotToken) -or [string]::IsNullOrWhiteSpace($TelegramChatId)) -and (Test-Path -LiteralPath $TelegramConfigPath)) {
$config = Get-Content -LiteralPath $TelegramConfigPath -Raw | ConvertFrom-Json
if ([string]::IsNullOrWhiteSpace($TelegramBotToken)) {
$script:TelegramBotToken = $config.bot_token
}
if ([string]::IsNullOrWhiteSpace($TelegramChatId)) {
$script:TelegramChatId = $config.chat_id
}
}
}
function Send-LoopNotification([string]$Message) {
Resolve-TelegramSettings
if ([string]::IsNullOrWhiteSpace($TelegramBotToken) -or [string]::IsNullOrWhiteSpace($TelegramChatId)) {
return
}
$notifier = Join-Path $PSScriptRoot "send_telegram_notification.ps1"
try {
& $notifier -Message $Message -BotToken $TelegramBotToken -ChatId $TelegramChatId -ConfigPath $TelegramConfigPath
}
catch {
Write-Warning ("Telegram notification failed: " + $_.Exception.Message)
}
}
function Resolve-CodexCommand() {
$cmd = Get-Command "codex.cmd" -ErrorAction SilentlyContinue
if ($cmd) {
return $cmd.Source
}
$fallback = Get-Command "codex" -ErrorAction SilentlyContinue
if ($fallback) {
return $fallback.Source
}
throw "Command not found: codex"
}
$projectPath = (Resolve-Path -LiteralPath $ProjectRoot).Path
$taskPath = (Resolve-Path -LiteralPath $TaskFile).Path
$reportPath = Resolve-RepoPath $projectPath $ReportFile
$codexCommand = Resolve-CodexCommand
$timestamp = Get-Date -Format "yyyyMMdd_HHmmss"
$runDir = Join-Path $projectPath ("automation\\runs\\loop_" + $timestamp)
New-Item -ItemType Directory -Force -Path $runDir | Out-Null
$codexStdoutPath = Join-Path $runDir "codex_stdout.txt"
$codexMessagePath = Join-Path $runDir "codex_last_message.txt"
$glmRunner = Join-Path $PSScriptRoot "run_glm_cycle.ps1"
Send-LoopNotification("GLM/Codex loop started: $(Split-Path -Leaf $taskPath)")
& $glmRunner `
-TaskFile $taskPath `
-ReportFile $reportPath `
-ProjectRoot $projectPath `
-Model $GlmModel `
-BaseUrl $GlmBaseUrl `
-AuthToken $GlmAuthToken `
-AgentsFile $GlmAgentsFile `
-TelegramBotToken $TelegramBotToken `
-TelegramChatId $TelegramChatId `
-TelegramConfigPath $TelegramConfigPath
if ($SkipCodexReview) {
Send-LoopNotification("GLM/Codex loop finished without Codex review: $(Split-Path -Leaf $taskPath)")
Write-Host "GLM worker finished. Codex review skipped by flag."
return
}
$reviewPrompt = @"
Read this worker task file:
$taskPath
Read this GLM report:
$reportPath
Your job:
1. Inspect the real diff in the repository.
2. Verify whether GLM actually implemented what the report claims.
3. Fix anything incorrect, incomplete, or unsafe.
4. Run the relevant validations mentioned by the task/report.
5. Leave the repository in the best corrected state you can reach in one pass.
6. Write a concise final summary to the output file configured by the CLI.
Be strict about overclaims. The code is the source of truth, not the report.
"@
$codexArgs = @(
"exec",
"--dangerously-bypass-approvals-and-sandbox",
"-C", $projectPath,
"-o", $codexMessagePath
)
if (-not [string]::IsNullOrWhiteSpace($CodexModel)) {
$codexArgs += @("-m", $CodexModel)
}
$codexArgs += $reviewPrompt
Write-Host ""
Write-Host "Running Codex review/correction pass..."
Send-LoopNotification("Codex review started: $(Split-Path -Leaf $taskPath)")
try {
& $codexCommand @codexArgs 2>&1 | Tee-Object -FilePath $codexStdoutPath
}
catch {
Send-LoopNotification("Codex review failed: $(Split-Path -Leaf $taskPath)`n$($_.Exception.Message)")
throw
}
Send-LoopNotification("GLM/Codex loop finished: $(Split-Path -Leaf $taskPath)`nReport: $(Split-Path -Leaf $reportPath)`nCodex note: $(Split-Path -Leaf $codexMessagePath)")
Write-Host ""
Write-Host "Loop finished."
Write-Host "Task: $taskPath"
Write-Host "GLM report: $reportPath"
Write-Host "Codex note: $codexMessagePath"
Write-Host "Codex stdout:$codexStdoutPath"

View File

@@ -1,162 +0,0 @@
param(
[Parameter(Mandatory = $true)]
[string]$TaskFile,
[Parameter(Mandatory = $true)]
[string]$ReportFile,
[string]$ProjectRoot = (Resolve-Path (Join-Path $PSScriptRoot "..")).Path,
[string]$Model = "glm-5",
[string]$BaseUrl = $(if ($env:ANTHROPIC_BASE_URL) { $env:ANTHROPIC_BASE_URL } else { "https://coding-intl.dashscope.aliyuncs.com/apps/anthropic" }),
[string]$AuthToken = $env:ANTHROPIC_AUTH_TOKEN,
[string]$AgentsFile = (Join-Path $PSScriptRoot "glm_agents.team.json"),
[string]$TelegramBotToken = $env:TELEGRAM_BOT_TOKEN,
[string]$TelegramChatId = $env:TELEGRAM_CHAT_ID,
[string]$TelegramConfigPath = (Join-Path $PSScriptRoot "telegram.local.json"),
[switch]$VerboseLogs
)
$ErrorActionPreference = "Stop"
function Require-Command([string]$Name) {
if (-not (Get-Command $Name -ErrorAction SilentlyContinue)) {
throw "Command not found: $Name"
}
}
function Require-File([string]$PathValue, [string]$Label) {
if (-not (Test-Path -LiteralPath $PathValue)) {
throw "$Label not found: $PathValue"
}
}
function Resolve-RepoPath([string]$BasePath, [string]$TargetPath) {
if ([System.IO.Path]::IsPathRooted($TargetPath)) {
return [System.IO.Path]::GetFullPath($TargetPath)
}
return [System.IO.Path]::GetFullPath((Join-Path $BasePath $TargetPath))
}
function Resolve-TelegramSettings() {
if (([string]::IsNullOrWhiteSpace($TelegramBotToken) -or [string]::IsNullOrWhiteSpace($TelegramChatId)) -and (Test-Path -LiteralPath $TelegramConfigPath)) {
$config = Get-Content -LiteralPath $TelegramConfigPath -Raw | ConvertFrom-Json
if ([string]::IsNullOrWhiteSpace($TelegramBotToken)) {
$script:TelegramBotToken = $config.bot_token
}
if ([string]::IsNullOrWhiteSpace($TelegramChatId)) {
$script:TelegramChatId = $config.chat_id
}
}
}
function Send-RunNotification([string]$Message) {
Resolve-TelegramSettings
if ([string]::IsNullOrWhiteSpace($TelegramBotToken) -or [string]::IsNullOrWhiteSpace($TelegramChatId)) {
return
}
$notifier = Join-Path $PSScriptRoot "send_telegram_notification.ps1"
try {
& $notifier -Message $Message -BotToken $TelegramBotToken -ChatId $TelegramChatId -ConfigPath $TelegramConfigPath
}
catch {
Write-Warning ("Telegram notification failed: " + $_.Exception.Message)
}
}
Require-Command "claude"
Require-File $TaskFile "Task file"
if ([string]::IsNullOrWhiteSpace($BaseUrl)) {
throw "ANTHROPIC_BASE_URL is not set. Pass -BaseUrl or export the env var first."
}
if ([string]::IsNullOrWhiteSpace($AuthToken)) {
throw "ANTHROPIC_AUTH_TOKEN is not set. Pass -AuthToken or export the env var first."
}
$env:ANTHROPIC_BASE_URL = $BaseUrl
$env:ANTHROPIC_AUTH_TOKEN = $AuthToken
$env:CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC = "1"
$env:ANTHROPIC_MODEL = $Model
$env:ANTHROPIC_SMALL_FAST_MODEL = $Model
$env:ANTHROPIC_DEFAULT_HAIKU_MODEL = $Model
$env:ANTHROPIC_DEFAULT_SONNET_MODEL = $Model
$env:ANTHROPIC_DEFAULT_OPUS_MODEL = $Model
$taskPath = (Resolve-Path -LiteralPath $TaskFile).Path
$projectPath = (Resolve-Path -LiteralPath $ProjectRoot).Path
$reportPath = Resolve-RepoPath $projectPath $ReportFile
$reportDir = Split-Path -Parent $reportPath
New-Item -ItemType Directory -Force -Path $reportDir | Out-Null
$timestamp = Get-Date -Format "yyyyMMdd_HHmmss"
$runDir = Join-Path $projectPath ("automation\\runs\\glm_" + $timestamp)
New-Item -ItemType Directory -Force -Path $runDir | Out-Null
$stdoutPath = Join-Path $runDir "glm_stdout.txt"
$prompt = @"
You are running as the GLM worker on this Windows repository.
Repository root:
$projectPath
Task file to follow exactly:
$taskPath
You must:
1. Read the task markdown and implement the requested changes in the repository.
2. Run the validations requested by the task.
3. Create or overwrite this report file with a truthful report:
$reportPath
4. Do not overclaim. If something is incomplete, say so explicitly in the report.
5. Keep the diff focused.
6. If custom agents are available, use them aggressively and in parallel where safe:
- planner first
- implementer_core and implementer_aux for disjoint work
- validator before finishing
- retrieval_reviewer or runtime_guard when relevant
- reporter last
Open and follow the task markdown from disk instead of asking for the task again.
"@
$claudeArgs = @(
"-p",
"--dangerously-skip-permissions",
"--effort", "max",
"--model", $Model,
"--add-dir", $projectPath
)
if (-not [string]::IsNullOrWhiteSpace($AgentsFile)) {
$agentsPath = (Resolve-Path -LiteralPath $AgentsFile).Path
$claudeArgs += @("--agents", (Get-Content -LiteralPath $agentsPath -Raw))
}
if ($VerboseLogs) {
$claudeArgs += "--verbose"
}
Write-Host "Running GLM worker with model $Model..."
Send-RunNotification("GLM worker started: $(Split-Path -Leaf $taskPath)")
try {
$prompt | & claude @claudeArgs 2>&1 | Tee-Object -FilePath $stdoutPath
}
catch {
Send-RunNotification("GLM worker failed: $(Split-Path -Leaf $taskPath)`n$($_.Exception.Message)")
throw
}
if (-not (Test-Path -LiteralPath $reportPath)) {
Send-RunNotification("GLM worker failed: missing report for $(Split-Path -Leaf $taskPath)")
throw "GLM finished but did not create the expected report file: $reportPath"
}
Send-RunNotification("GLM worker finished: $(Split-Path -Leaf $taskPath)`nReport: $(Split-Path -Leaf $reportPath)")
Write-Host ""
Write-Host "GLM cycle finished."
Write-Host "Task: $taskPath"
Write-Host "Report: $reportPath"
Write-Host "Stdout: $stdoutPath"

View File

@@ -1,141 +0,0 @@
param(
[string]$QueueFile = (Join-Path $PSScriptRoot "task_queue.json"),
[string]$ProjectRoot = (Resolve-Path (Join-Path $PSScriptRoot "..")).Path,
[string]$GlmModel = "glm-5",
[string]$GlmBaseUrl = $(if ($env:ANTHROPIC_BASE_URL) { $env:ANTHROPIC_BASE_URL } else { "https://coding-intl.dashscope.aliyuncs.com/apps/anthropic" }),
[string]$GlmAuthToken = $env:ANTHROPIC_AUTH_TOKEN,
[string]$GlmAgentsFile = (Join-Path $PSScriptRoot "glm_agents.team.json"),
[string]$CodexModel = "",
[string]$TelegramBotToken = $env:TELEGRAM_BOT_TOKEN,
[string]$TelegramChatId = $env:TELEGRAM_CHAT_ID,
[string]$TelegramConfigPath = (Join-Path $PSScriptRoot "telegram.local.json"),
[int]$PollSeconds = 30,
[switch]$Watch,
[switch]$ContinueOnError
)
$ErrorActionPreference = "Stop"
function Resolve-RepoPath([string]$BasePath, [string]$TargetPath) {
if ([System.IO.Path]::IsPathRooted($TargetPath)) {
return [System.IO.Path]::GetFullPath($TargetPath)
}
return [System.IO.Path]::GetFullPath((Join-Path $BasePath $TargetPath))
}
function Load-Queue([string]$PathValue) {
return Get-Content -LiteralPath $PathValue -Raw | ConvertFrom-Json -Depth 20
}
function Save-Queue([string]$PathValue, $QueueObject) {
$QueueObject | ConvertTo-Json -Depth 20 | Set-Content -LiteralPath $PathValue -Encoding UTF8
}
function Resolve-TelegramSettings() {
if (([string]::IsNullOrWhiteSpace($TelegramBotToken) -or [string]::IsNullOrWhiteSpace($TelegramChatId)) -and (Test-Path -LiteralPath $TelegramConfigPath)) {
$config = Get-Content -LiteralPath $TelegramConfigPath -Raw | ConvertFrom-Json
if ([string]::IsNullOrWhiteSpace($TelegramBotToken)) {
$script:TelegramBotToken = $config.bot_token
}
if ([string]::IsNullOrWhiteSpace($TelegramChatId)) {
$script:TelegramChatId = $config.chat_id
}
}
}
function Send-QueueNotification([string]$Message) {
Resolve-TelegramSettings
if ([string]::IsNullOrWhiteSpace($TelegramBotToken) -or [string]::IsNullOrWhiteSpace($TelegramChatId)) {
return
}
$notifier = Join-Path $PSScriptRoot "send_telegram_notification.ps1"
try {
& $notifier -Message $Message -BotToken $TelegramBotToken -ChatId $TelegramChatId -ConfigPath $TelegramConfigPath
}
catch {
Write-Warning ("Telegram notification failed: " + $_.Exception.Message)
}
}
function Find-NextTask($QueueObject) {
foreach ($task in $QueueObject.tasks) {
if ($task.enabled -and $task.status -eq "pending") {
return $task
}
}
return $null
}
$projectPath = (Resolve-Path -LiteralPath $ProjectRoot).Path
$queuePath = Resolve-RepoPath $projectPath $QueueFile
$loopRunner = Join-Path $PSScriptRoot "run_glm_codex_loop.ps1"
$historyDir = Join-Path $projectPath "automation\\runs\\queue"
New-Item -ItemType Directory -Force -Path $historyDir | Out-Null
Send-QueueNotification("AbletonMCP_AI queue runner started on $(Get-Date -Format 'yyyy-MM-dd HH:mm:ss'). Watching=$Watch ContinueOnError=$ContinueOnError")
do {
$queue = Load-Queue $queuePath
$task = Find-NextTask $queue
if ($null -eq $task) {
if ($Watch) {
Start-Sleep -Seconds $PollSeconds
continue
}
break
}
$taskPath = Resolve-RepoPath $projectPath $task.task_file
$reportPath = Resolve-RepoPath $projectPath $task.report_file
$task.status = "running"
$task.started_at = (Get-Date).ToString("s")
Save-Queue $queuePath $queue
Send-QueueNotification("Queue task started: [$($task.id)] $($task.title)")
try {
& $loopRunner `
-TaskFile $taskPath `
-ReportFile $reportPath `
-ProjectRoot $projectPath `
-GlmModel $GlmModel `
-GlmBaseUrl $GlmBaseUrl `
-GlmAuthToken $GlmAuthToken `
-GlmAgentsFile $GlmAgentsFile `
-CodexModel $CodexModel `
-TelegramBotToken $TelegramBotToken `
-TelegramChatId $TelegramChatId `
-TelegramConfigPath $TelegramConfigPath
$queue = Load-Queue $queuePath
foreach ($item in $queue.tasks) {
if ($item.id -eq $task.id) {
$item.status = "completed"
$item.completed_at = (Get-Date).ToString("s")
break
}
}
Save-Queue $queuePath $queue
Send-QueueNotification("Queue task completed: [$($task.id)] $($task.title)")
}
catch {
$queue = Load-Queue $queuePath
foreach ($item in $queue.tasks) {
if ($item.id -eq $task.id) {
$item.status = "failed"
$item.failed_at = (Get-Date).ToString("s")
$item.error = $_.Exception.Message
break
}
}
Save-Queue $queuePath $queue
Send-QueueNotification("Queue task failed: [$($task.id)] $($task.title)`n$($_.Exception.Message)")
if (-not $ContinueOnError) {
throw
}
}
}
while ($true)

View File

@@ -1,33 +0,0 @@
param(
[Parameter(Mandatory = $true)]
[string]$Message,
[string]$BotToken = $env:TELEGRAM_BOT_TOKEN,
[string]$ChatId = $env:TELEGRAM_CHAT_ID,
[string]$ConfigPath = (Join-Path $PSScriptRoot "telegram.local.json")
)
$ErrorActionPreference = "Stop"
if (([string]::IsNullOrWhiteSpace($BotToken) -or [string]::IsNullOrWhiteSpace($ChatId)) -and (Test-Path -LiteralPath $ConfigPath)) {
$config = Get-Content -LiteralPath $ConfigPath -Raw | ConvertFrom-Json
if ([string]::IsNullOrWhiteSpace($BotToken)) {
$BotToken = $config.bot_token
}
if ([string]::IsNullOrWhiteSpace($ChatId)) {
$ChatId = $config.chat_id
}
}
if ([string]::IsNullOrWhiteSpace($BotToken) -or [string]::IsNullOrWhiteSpace($ChatId)) {
exit 0
}
$uri = "https://api.telegram.org/bot$BotToken/sendMessage"
$body = @{
chat_id = $ChatId
text = $Message
disable_web_page_preview = $true
}
Invoke-RestMethod -Uri $uri -Method Post -Body $body | Out-Null

View File

@@ -1,18 +0,0 @@
[Unit]
Description=AbletonMCP_AI autonomous GLM/Codex queue
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
User=ren
WorkingDirectory=/mnt/c/ProgramData/Ableton/Live 12 Suite/Resources/MIDI Remote Scripts/AbletonMCP_AI
Environment=LOCAL_ENV_FILE=/mnt/c/ProgramData/Ableton/Live 12 Suite/Resources/MIDI Remote Scripts/AbletonMCP_AI/automation/wsl.local.env
ExecStart=/bin/bash /mnt/c/ProgramData/Ableton/Live 12 Suite/Resources/MIDI Remote Scripts/AbletonMCP_AI/automation/wsl/run_task_queue.sh
Restart=always
RestartSec=15
StandardOutput=append:/mnt/c/ProgramData/Ableton/Live 12 Suite/Resources/MIDI Remote Scripts/AbletonMCP_AI/automation/wsl_runtime/logs/service.log
StandardError=append:/mnt/c/ProgramData/Ableton/Live 12 Suite/Resources/MIDI Remote Scripts/AbletonMCP_AI/automation/wsl_runtime/logs/service.log
[Install]
WantedBy=multi-user.target

View File

@@ -1,53 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
RUNTIME_DIR="$PROJECT_ROOT/automation/wsl_runtime"
CODEX_HOME_DIR="$RUNTIME_DIR/codex_home"
WINDOWS_CODEX_HOME="/mnt/c/Users/ren/.codex"
ENV_FILE="$PROJECT_ROOT/automation/wsl.local.env"
OPENAI_API_KEY_VALUE=""
mkdir -p "$CODEX_HOME_DIR" "$RUNTIME_DIR/logs"
if [[ -f "$WINDOWS_CODEX_HOME/auth.json" && ! -f "$CODEX_HOME_DIR/auth.json" ]]; then
cp "$WINDOWS_CODEX_HOME/auth.json" "$CODEX_HOME_DIR/auth.json"
fi
if [[ -f "$CODEX_HOME_DIR/auth.json" ]]; then
OPENAI_API_KEY_VALUE="$(jq -r '.OPENAI_API_KEY // empty' "$CODEX_HOME_DIR/auth.json" 2>/dev/null || true)"
fi
cat > "$CODEX_HOME_DIR/config.toml" <<'EOF'
model = "gpt-5.4"
[sandbox_workspace_write]
network_access = true
EOF
cat > "$ENV_FILE" <<EOF
export ANTHROPIC_BASE_URL='https://coding-intl.dashscope.aliyuncs.com/apps/anthropic'
export ANTHROPIC_AUTH_TOKEN='sk-sp-e87cea7b587c4af09e465726b084f41b'
export GLM_MODEL='glm-5'
export CODEX_MODEL='gpt-5.4'
export TELEGRAM_BOT_TOKEN='8444660361:AAECCo6oon0dbnQMzgaanZntYFOLgcZrcJ4'
export TELEGRAM_CHAT_ID='692714536'
export CODEX_HOME='/mnt/c/ProgramData/Ableton/Live 12 Suite/Resources/MIDI Remote Scripts/AbletonMCP_AI/automation/wsl_runtime/codex_home'
export GLM_AGENTS_FILE='/mnt/c/ProgramData/Ableton/Live 12 Suite/Resources/MIDI Remote Scripts/AbletonMCP_AI/automation/glm_agents.team.json'
export POLL_SECONDS='30'
export WATCH='1'
export CONTINUE_ON_ERROR='1'
EOF
if [[ -n "$OPENAI_API_KEY_VALUE" ]]; then
printf "export OPENAI_API_KEY='%s'\n" "$OPENAI_API_KEY_VALUE" >> "$ENV_FILE"
fi
chmod 600 "$ENV_FILE" "$CODEX_HOME_DIR/auth.json" 2>/dev/null || true
chmod +x "$SCRIPT_DIR/"*.sh
echo "WSL runtime bootstrapped"
echo "Runtime dir: $RUNTIME_DIR"
echo "Env file: $ENV_FILE"
echo "Codex home: $CODEX_HOME_DIR"

View File

@@ -1,163 +0,0 @@
services:
postgres:
image: postgres:16-alpine
container_name: abletonmcp-postgres
restart: unless-stopped
environment:
POSTGRES_USER: ${POSTGRES_USER:-postgres}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-changeme}
POSTGRES_DB: ${POSTGRES_BOOTSTRAP_DB:-postgres}
PGDATA: /var/lib/postgresql/data/pgdata
GITEA_DB_NAME: ${GITEA_DB_NAME:-gitea}
N8N_DB_NAME: ${N8N_DB_NAME:-n8n}
volumes:
- postgres-data:/var/lib/postgresql/data
- ./initdb:/docker-entrypoint-initdb.d:ro
ports:
- "${POSTGRES_PORT:-5432}:5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres} -d ${POSTGRES_BOOTSTRAP_DB:-postgres}"]
interval: 10s
timeout: 5s
retries: 5
start_period: 20s
networks:
- internal
redis:
image: redis:7-alpine
container_name: abletonmcp-redis
restart: unless-stopped
command:
- redis-server
- --requirepass
- ${REDIS_PASSWORD:-changeme}
- --appendonly
- "yes"
- --save
- "60"
- "1000"
volumes:
- redis-data:/data
ports:
- "${REDIS_PORT:-6379}:6379"
healthcheck:
test: ["CMD-SHELL", "redis-cli -a ${REDIS_PASSWORD:-changeme} ping | grep -q PONG"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
networks:
- internal
gitea:
image: gitea/gitea:1.21-rootless
container_name: abletonmcp-gitea
restart: unless-stopped
environment:
USER_UID: 1000
USER_GID: 1000
GITEA__database__DB_TYPE: postgres
GITEA__database__HOST: postgres:5432
GITEA__database__NAME: ${GITEA_DB_NAME:-gitea}
GITEA__database__USER: ${POSTGRES_USER:-postgres}
GITEA__database__PASSWD: ${POSTGRES_PASSWORD:-changeme}
GITEA__server__DOMAIN: ${GITEA_DOMAIN:-localhost}
GITEA__server__ROOT_URL: ${GITEA_ROOT_URL:-http://localhost:3000}
GITEA__server__HTTP_PORT: 3000
GITEA__server__SSH_DOMAIN: ${GITEA_SSH_DOMAIN:-localhost}
GITEA__server__SSH_PORT: ${GITEA_SSH_PORT:-222}
GITEA__server__START_SSH_SERVER: "true"
GITEA__server__SSH_LISTEN_PORT: 222
GITEA__security__INSTALL_LOCK: ${GITEA_SECURITY_INSTALL_LOCK:-true}
GITEA__service__DISABLE_REGISTRATION: "true"
GITEA__server__OFFLINE_MODE: ${GITEA_OFFLINE_MODE:-true}
volumes:
- gitea-data:/var/lib/gitea
- gitea-config:/etc/gitea
- gitea-logs:/var/log/gitea
ports:
- "${GITEA_HTTP_PORT:-3000}:3000"
- "${GITEA_SSH_PORT:-222}:222"
healthcheck:
test: ["CMD-SHELL", "wget -q --spider http://localhost:3000/api/healthz || exit 1"]
interval: 15s
timeout: 5s
retries: 10
start_period: 45s
depends_on:
postgres:
condition: service_healthy
networks:
- internal
n8n:
image: n8nio/n8n:latest
container_name: abletonmcp-n8n
restart: unless-stopped
environment:
DB_TYPE: postgresdb
DB_POSTGRESDB_HOST: postgres
DB_POSTGRESDB_PORT: 5432
DB_POSTGRESDB_DATABASE: ${N8N_DB_NAME:-n8n}
DB_POSTGRESDB_USER: ${POSTGRES_USER:-postgres}
DB_POSTGRESDB_PASSWORD: ${POSTGRES_PASSWORD:-changeme}
N8N_PORT: 5678
N8N_PROTOCOL: http
N8N_HOST: ${N8N_HOST:-localhost}
N8N_PATH: ${N8N_PATH:-/}
N8N_ENCRYPTION_KEY: ${N8N_ENCRYPTION_KEY:-changeme-change-this}
N8N_LOG_LEVEL: ${N8N_LOG_LEVEL:-info}
N8N_EXECUTIONS_MODE: ${N8N_EXECUTIONS_MODE:-regular}
N8N_BASIC_AUTH_ACTIVE: ${N8N_BASIC_AUTH_ACTIVE:-true}
N8N_BASIC_AUTH_USER: ${N8N_BASIC_AUTH_USER:-admin}
N8N_BASIC_AUTH_PASSWORD: ${N8N_BASIC_AUTH_PASSWORD:-changeme}
N8N_COOKIE_POLICY: ${N8N_COOKIE_POLICY:-lax}
N8N_HOST_ALLOW_LIST: ${N8N_HOST_ALLOW_LIST:-localhost,127.0.0.1}
N8N_WEBHOOK_URL: ${N8N_WEBHOOK_URL:-http://localhost:5678/}
N8N_EDITOR_BASE_URL: ${N8N_EDITOR_BASE_URL:-http://localhost:5678}
GENERIC_TIMEZONE: ${TZ:-UTC}
TZ: ${TZ:-UTC}
N8N_DIAGNOSTICS_ENABLED: ${N8N_DIAGNOSTICS_ENABLED:-false}
N8N_VERSION_NOTIFICATIONS_ENABLED: ${N8N_VERSION_NOTIFICATIONS_ENABLED:-false}
volumes:
- n8n-data:/home/node/.n8n
- n8n-logs:/home/node/.npm/_logs
- ${PROJECT_PATH:-/mnt/c/ProgramData/Ableton/Live 12 Suite/Resources/MIDI Remote Scripts/AbletonMCP_AI}:/project:rw
- ${PROJECT_PATH:-/mnt/c/ProgramData/Ableton/Live 12 Suite/Resources/MIDI Remote Scripts/AbletonMCP_AI}/automation/workflows:/workflows:ro
ports:
- "${N8N_PORT:-5678}:5678"
healthcheck:
test: ["CMD-SHELL", "wget -q --spider http://localhost:5678/healthz || exit 1"]
interval: 15s
timeout: 5s
retries: 10
start_period: 45s
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
networks:
- internal
networks:
internal:
name: abletonmcp-network
driver: bridge
volumes:
postgres-data:
name: abletonmcp-postgres-data
gitea-data:
name: abletonmcp-gitea-data
gitea-config:
name: abletonmcp-gitea-config
gitea-logs:
name: abletonmcp-gitea-logs
redis-data:
name: abletonmcp-redis-data
n8n-data:
name: abletonmcp-n8n-data
n8n-logs:
name: abletonmcp-n8n-logs

View File

@@ -1,18 +0,0 @@
#!/bin/sh
set -eu
create_db() {
db_name="$1"
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "${POSTGRES_BOOTSTRAP_DB:-postgres}" <<-EOSQL
SELECT 'CREATE DATABASE "${db_name}"'
WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = '${db_name}')\gexec
EOSQL
}
if [ -n "${GITEA_DB_NAME:-}" ]; then
create_db "$GITEA_DB_NAME"
fi
if [ -n "${N8N_DB_NAME:-}" ]; then
create_db "$N8N_DB_NAME"
fi

View File

@@ -1,12 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SERVICE_SRC="$SCRIPT_DIR/ableton-glm-loop.service"
SERVICE_DST="/etc/systemd/system/ableton-glm-loop.service"
sudo cp "$SERVICE_SRC" "$SERVICE_DST"
sudo systemctl daemon-reload
sudo systemctl enable ableton-glm-loop.service
sudo systemctl restart ableton-glm-loop.service
sudo systemctl status --no-pager ableton-glm-loop.service || true

View File

@@ -1,59 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
LOCAL_ENV_FILE="${LOCAL_ENV_FILE:-$PROJECT_ROOT/automation/wsl.local.env}"
if [[ -f "$LOCAL_ENV_FILE" ]]; then
# shellcheck disable=SC1090
source "$LOCAL_ENV_FILE"
fi
TASK_FILE="${1:?task file is required}"
REPORT_FILE="${2:?report file is required}"
GLM_MODEL="${GLM_MODEL:-glm-5}"
CODEX_MODEL="${CODEX_MODEL:-gpt-5.4}"
SKIP_CODEX_REVIEW="${SKIP_CODEX_REVIEW:-0}"
CODEX_HOME="${CODEX_HOME:-$PROJECT_ROOT/automation/wsl_runtime/codex_home}"
export CODEX_HOME
if [[ -n "${OPENAI_API_KEY:-}" ]]; then
export OPENAI_API_KEY
fi
RUN_DIR="$PROJECT_ROOT/automation/runs/loop_$(date +%Y%m%d_%H%M%S)"
CODEX_STDOUT_PATH="$RUN_DIR/codex_stdout.txt"
CODEX_MESSAGE_PATH="$RUN_DIR/codex_last_message.txt"
mkdir -p "$RUN_DIR"
notify() {
"$SCRIPT_DIR/send_telegram.sh" "$1" || true
}
notify "GLM/Codex loop started: $(basename "$TASK_FILE")"
"$SCRIPT_DIR/run_glm_cycle.sh" "$TASK_FILE" "$REPORT_FILE"
if [[ "$SKIP_CODEX_REVIEW" == "1" ]]; then
notify "GLM/Codex loop finished without Codex review: $(basename "$TASK_FILE")"
exit 0
fi
notify "Codex review started: $(basename "$TASK_FILE")"
WIN_TASK_FILE="$(wslpath -w "$TASK_FILE")"
WIN_REPORT_FILE="$(wslpath -w "$REPORT_FILE")"
WIN_PROJECT_ROOT="$(wslpath -w "$PROJECT_ROOT")"
WIN_CODEX_MESSAGE_PATH="$(wslpath -w "$CODEX_MESSAGE_PATH")"
WIN_REVIEW_SCRIPT="$(wslpath -w "$PROJECT_ROOT/automation/invoke_codex_review.ps1")"
if ! /mnt/c/Windows/System32/WindowsPowerShell/v1.0/powershell.exe -NoProfile -ExecutionPolicy Bypass -File "$WIN_REVIEW_SCRIPT" -TaskFile "$WIN_TASK_FILE" -ReportFile "$WIN_REPORT_FILE" -ProjectRoot "$WIN_PROJECT_ROOT" -OutputFile "$WIN_CODEX_MESSAGE_PATH" -CodexModel "$CODEX_MODEL" 2>&1 | tee "$CODEX_STDOUT_PATH"; then
notify "Codex review failed: $(basename "$TASK_FILE")"
exit 1
fi
notify "GLM/Codex loop finished: $(basename "$TASK_FILE")"
echo "Loop finished"
echo "Task: $TASK_FILE"
echo "GLM report: $REPORT_FILE"
echo "Codex note: $CODEX_MESSAGE_PATH"
echo "Codex stdout:$CODEX_STDOUT_PATH"

View File

@@ -1,92 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
LOCAL_ENV_FILE="${LOCAL_ENV_FILE:-$PROJECT_ROOT/automation/wsl.local.env}"
if [[ -f "$LOCAL_ENV_FILE" ]]; then
# shellcheck disable=SC1090
source "$LOCAL_ENV_FILE"
fi
TASK_FILE="${1:?task file is required}"
REPORT_FILE="${2:?report file is required}"
GLM_MODEL="${GLM_MODEL:-glm-5}"
GLM_AGENTS_FILE="${GLM_AGENTS_FILE:-$PROJECT_ROOT/automation/glm_agents.team.json}"
export ANTHROPIC_BASE_URL="${ANTHROPIC_BASE_URL:-https://coding-intl.dashscope.aliyuncs.com/apps/anthropic}"
export ANTHROPIC_AUTH_TOKEN="${ANTHROPIC_AUTH_TOKEN:?ANTHROPIC_AUTH_TOKEN is required}"
export CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC="1"
export ANTHROPIC_MODEL="$GLM_MODEL"
export ANTHROPIC_SMALL_FAST_MODEL="$GLM_MODEL"
export ANTHROPIC_DEFAULT_HAIKU_MODEL="$GLM_MODEL"
export ANTHROPIC_DEFAULT_SONNET_MODEL="$GLM_MODEL"
export ANTHROPIC_DEFAULT_OPUS_MODEL="$GLM_MODEL"
RUN_DIR="$PROJECT_ROOT/automation/runs/glm_$(date +%Y%m%d_%H%M%S)"
STDOUT_PATH="$RUN_DIR/glm_stdout.txt"
mkdir -p "$RUN_DIR" "$(dirname "$REPORT_FILE")"
notify() {
"$SCRIPT_DIR/send_telegram.sh" "$1" || true
}
PROMPT=$(cat <<EOF
You are running as the GLM worker on this Linux repository.
Repository root:
$PROJECT_ROOT
Task file to follow exactly:
$TASK_FILE
You must:
1. Read the task markdown and implement the requested changes in the repository.
2. Run the validations requested by the task.
3. Create or overwrite this report file with a truthful report:
$REPORT_FILE
4. Do not overclaim. If something is incomplete, say so explicitly in the report.
5. Keep the diff focused.
6. If custom agents are available, use them aggressively and in parallel where safe:
- planner first
- implementer_core and implementer_aux for disjoint work
- validator before finishing
- retrieval_reviewer or runtime_guard when relevant
- reporter last
Open and follow the task markdown from disk instead of asking for the task again.
EOF
)
ARGS=(
-p
--dangerously-skip-permissions
--effort max
--model "$GLM_MODEL"
--add-dir "$PROJECT_ROOT"
)
if [[ -f "$GLM_AGENTS_FILE" ]]; then
AGENTS_JSON="$(cat "$GLM_AGENTS_FILE")"
ARGS+=(--agents "$AGENTS_JSON")
fi
notify "GLM worker started: $(basename "$TASK_FILE")"
if ! printf '%s\n' "$PROMPT" | claude "${ARGS[@]}" 2>&1 | tee "$STDOUT_PATH"; then
notify "GLM worker failed: $(basename "$TASK_FILE")"
exit 1
fi
if [[ ! -f "$REPORT_FILE" ]]; then
notify "GLM worker failed: missing report for $(basename "$TASK_FILE")"
echo "missing report: $REPORT_FILE" >&2
exit 1
fi
notify "GLM worker finished: $(basename "$TASK_FILE")"
echo "GLM cycle finished"
echo "Task: $TASK_FILE"
echo "Report: $REPORT_FILE"
echo "Stdout: $STDOUT_PATH"

View File

@@ -1,106 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
QUEUE_FILE="${QUEUE_FILE:-$PROJECT_ROOT/automation/task_queue.json}"
LOCAL_ENV_FILE="${LOCAL_ENV_FILE:-$PROJECT_ROOT/automation/wsl.local.env}"
POLL_SECONDS="${POLL_SECONDS:-30}"
WATCH="${WATCH:-1}"
CONTINUE_ON_ERROR="${CONTINUE_ON_ERROR:-1}"
if [[ -f "$LOCAL_ENV_FILE" ]]; then
# shellcheck disable=SC1090
source "$LOCAL_ENV_FILE"
fi
notify() {
"$SCRIPT_DIR/send_telegram.sh" "$1" || true
}
queue_has_pending() {
jq -e '.tasks[] | select(.enabled == true and .status == "pending")' "$QUEUE_FILE" >/dev/null
}
read_next_task() {
jq -r '.tasks[] | select(.enabled == true and .status == "pending") | @base64' "$QUEUE_FILE" | head -n 1
}
update_task_status() {
local task_id="$1"
local status="$2"
local field="$3"
local value="$4"
local tmp
local queue_dir
queue_dir="$(dirname "$QUEUE_FILE")"
tmp="$(mktemp "$queue_dir/.task_queue.tmp.XXXXXX")"
jq --arg id "$task_id" --arg status "$status" --arg field "$field" --arg value "$value" '
.tasks |= map(
if .id == $id then
.status = $status | .[$field] = $value
else
.
end
)' "$QUEUE_FILE" > "$tmp"
mv "$tmp" "$QUEUE_FILE"
}
set_task_error() {
local task_id="$1"
local message="$2"
local tmp
local queue_dir
queue_dir="$(dirname "$QUEUE_FILE")"
tmp="$(mktemp "$queue_dir/.task_queue.tmp.XXXXXX")"
jq --arg id "$task_id" --arg msg "$message" '
.tasks |= map(
if .id == $id then
.error = $msg
else
.
end
)' "$QUEUE_FILE" > "$tmp"
mv "$tmp" "$QUEUE_FILE"
}
notify "AbletonMCP_AI queue runner started on $(date '+%Y-%m-%d %H:%M:%S')"
while true; do
if ! queue_has_pending; then
if [[ "$WATCH" == "1" ]]; then
sleep "$POLL_SECONDS"
continue
fi
break
fi
task_b64="$(read_next_task)"
if [[ -z "$task_b64" ]]; then
sleep "$POLL_SECONDS"
continue
fi
task_json="$(printf '%s' "$task_b64" | base64 -d)"
task_id="$(printf '%s' "$task_json" | jq -r '.id')"
task_title="$(printf '%s' "$task_json" | jq -r '.title')"
task_file_rel="$(printf '%s' "$task_json" | jq -r '.task_file')"
report_file_rel="$(printf '%s' "$task_json" | jq -r '.report_file')"
task_file="$PROJECT_ROOT/${task_file_rel//\\//}"
report_file="$PROJECT_ROOT/${report_file_rel//\\//}"
update_task_status "$task_id" "running" "started_at" "$(date -Iseconds)"
notify "Queue task started: [$task_id] $task_title"
if "$SCRIPT_DIR/run_glm_codex_loop.sh" "$task_file" "$report_file"; then
update_task_status "$task_id" "completed" "completed_at" "$(date -Iseconds)"
notify "Queue task completed: [$task_id] $task_title"
else
update_task_status "$task_id" "failed" "failed_at" "$(date -Iseconds)"
set_task_error "$task_id" "task runner failed"
notify "Queue task failed: [$task_id] $task_title"
if [[ "$CONTINUE_ON_ERROR" != "1" ]]; then
exit 1
fi
fi
done

View File

@@ -1,281 +0,0 @@
#!/usr/bin/env bash
#
# install.sh - Install Docker, Docker Compose, and local Python runtime on Ubuntu 24.04 WSL2
# Idempotent: safe to run multiple times
#
set -euo pipefail
readonly RED='\033[0;31m'
readonly GREEN='\033[0;32m'
readonly YELLOW='\033[1;33m'
readonly NC='\033[0m'
log_info() { echo -e "${GREEN}[INFO]${NC} $*"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
log_error() { echo -e "${RED}[ERROR]${NC} $*"; }
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
WSL_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
AUTOMATION_DIR="$(cd "$WSL_DIR/.." && pwd)"
PROJECT_ROOT="$(cd "$AUTOMATION_DIR/.." && pwd)"
RUNTIME_DIR="$AUTOMATION_DIR/wsl_runtime"
VENV_DIR="$RUNTIME_DIR/venv"
check_sudo() {
if [[ $EUID -eq 0 ]]; then
log_error "This script should not be run as root. It will use sudo when needed."
exit 1
fi
}
detect_ubuntu() {
if [[ ! -f /etc/os-release ]]; then
log_error "Cannot detect OS version. /etc/os-release not found."
exit 1
fi
# shellcheck disable=SC1091
source /etc/os-release
if [[ "${ID:-}" != "ubuntu" ]]; then
log_warn "This script is designed for Ubuntu. Detected: ${ID:-unknown}"
fi
log_info "Detected Ubuntu ${VERSION_ID:-unknown}"
}
check_wsl2() {
if [[ ! -f /proc/version ]]; then
log_warn "Cannot verify WSL environment"
return
fi
if grep -qi microsoft /proc/version; then
log_info "Running in WSL environment"
else
log_warn "Not running in WSL. This script is designed for WSL2."
fi
}
install_docker() {
log_info "Checking Docker installation..."
if command -v docker >/dev/null 2>&1; then
log_info "Docker already installed: $(docker --version)"
else
log_info "Installing Docker..."
sudo apt-get update -q
sudo apt-get install -y \
ca-certificates \
curl \
gnupg \
lsb-release \
software-properties-common
sudo install -m 0755 -d /etc/apt/keyrings
if [[ ! -f /etc/apt/keyrings/docker.gpg ]]; then
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
sudo chmod a+r /etc/apt/keyrings/docker.gpg
fi
local codename
codename=$(. /etc/os-release && echo "$VERSION_CODENAME")
sudo tee /etc/apt/sources.list.d/docker.list >/dev/null <<EOF
deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $codename stable
EOF
sudo apt-get update -q
sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
fi
if ! groups "$USER" | grep -q '\bdocker\b'; then
log_info "Adding user $USER to docker group..."
sudo usermod -aG docker "$USER"
log_warn "A new login session may be needed for docker group membership."
fi
sudo systemctl enable docker
sudo systemctl start docker
}
install_python() {
log_info "Checking Python installation..."
if command -v python3 >/dev/null 2>&1; then
log_info "Python already installed: $(python3 --version)"
else
sudo apt-get update -q
sudo apt-get install -y python3 python3-pip python3-venv python3-full
fi
}
install_utilities() {
log_info "Installing system utilities..."
sudo apt-get update -q
sudo apt-get install -y \
jq \
git \
curl \
wget \
rsync \
net-tools \
dnsutils \
htop \
ncdu \
tree \
unzip \
zip \
httpie \
python3-rich \
pipx
}
configure_docker_wsl2() {
log_info "Configuring Docker for WSL..."
local docker_config_dir="/etc/docker"
local docker_config_file="$docker_config_dir/daemon.json"
if [[ ! -f "$docker_config_file" ]]; then
sudo mkdir -p "$docker_config_dir"
sudo tee "$docker_config_file" >/dev/null <<'EOF'
{
"log-driver": "json-file",
"log-opts": {
"max-size": "10m",
"max-file": "3"
},
"features": {
"containerd-snapshotter": true
},
"iptables": false
}
EOF
sudo systemctl restart docker
fi
local bashrc_file="$HOME/.bashrc"
if ! grep -q 'WSL Docker helpers' "$bashrc_file" 2>/dev/null; then
cat >> "$bashrc_file" <<'EOF'
# WSL Docker helpers
export DOCKER_HOST=unix:///var/run/docker.sock
EOF
fi
}
handle_windows_paths() {
log_info "Ensuring project symlink exists..."
if [[ ! -L "$HOME/ableton-mcp-ai" ]]; then
ln -sfn "$PROJECT_ROOT" "$HOME/ableton-mcp-ai"
fi
}
install_python_dependencies() {
log_info "Preparing local virtual environment..."
mkdir -p "$RUNTIME_DIR"
if [[ ! -d "$VENV_DIR" ]]; then
python3 -m venv "$VENV_DIR"
fi
# shellcheck disable=SC1091
source "$VENV_DIR/bin/activate"
python -m pip install --upgrade pip
local found_req=false
local requirements_files=(
"$PROJECT_ROOT/MCP_Server/requirements.txt"
"$PROJECT_ROOT/requirements.txt"
)
for req_file in "${requirements_files[@]}"; do
if [[ -f "$req_file" ]]; then
log_info "Installing dependencies from: $req_file"
python -m pip install -r "$req_file"
found_req=true
fi
done
if [[ "$found_req" == "false" ]]; then
log_warn "No requirements.txt files found"
fi
deactivate
}
verify_installation() {
log_info "Verifying installation..."
local all_good=true
if command -v docker >/dev/null 2>&1; then
log_info "OK Docker: $(docker --version)"
else
log_error "FAIL Docker not found"
all_good=false
fi
if docker compose version >/dev/null 2>&1; then
log_info "OK Docker Compose: $(docker compose version)"
else
log_error "FAIL Docker Compose not found"
all_good=false
fi
if command -v python3 >/dev/null 2>&1; then
log_info "OK Python: $(python3 --version)"
else
log_error "FAIL Python3 not found"
all_good=false
fi
if [[ -x "$VENV_DIR/bin/python" ]]; then
log_info "OK Venv: $VENV_DIR"
else
log_error "FAIL Venv not found at $VENV_DIR"
all_good=false
fi
if command -v jq >/dev/null 2>&1; then
log_info "OK jq installed"
else
log_error "FAIL jq not found"
all_good=false
fi
if [[ "$all_good" == "true" ]]; then
log_info "All dependencies installed successfully"
return 0
fi
log_error "Some dependencies failed to install"
return 1
}
main() {
log_info "Starting AbletonMCP-AI WSL installation..."
echo
check_sudo
detect_ubuntu
check_wsl2
echo
install_docker
install_python
install_utilities
configure_docker_wsl2
handle_windows_paths
install_python_dependencies
echo
verify_installation
echo
log_info "Installation complete"
log_info "Next step: run ./setup.sh and then ./start.sh"
}
main "$@"

View File

@@ -1,21 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
WSL_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
SYSTEMD_DIR="$WSL_DIR/systemd"
if [[ $EUID -ne 0 ]]; then
echo "Run with sudo"
exit 1
fi
for service_file in "$SYSTEMD_DIR"/*.service; do
cp "$service_file" /etc/systemd/system/"$(basename "$service_file")"
done
systemctl daemon-reload
systemctl enable abletonmcp-stack.service abletonmcp-queue-runner.service
echo "Installed systemd units"
echo "Enabled by default: abletonmcp-stack.service, abletonmcp-queue-runner.service"
echo "Optional unit left disabled: abletonmcp-glm-runner.service"

View File

@@ -1,39 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
WSL_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
AUTOMATION_DIR="$(cd "$WSL_DIR/.." && pwd)"
DOCKER_ENV_FILE="$WSL_DIR/.env"
COMPOSE_FILE="$WSL_DIR/docker-compose.yml"
LOGS_DIR="$AUTOMATION_DIR/wsl_runtime/logs"
follow="${1:-all}"
compose_cmd() {
docker compose --env-file "$DOCKER_ENV_FILE" -f "$COMPOSE_FILE" "$@"
}
case "$follow" in
docker)
compose_cmd logs -f
;;
queue)
tail -f "$LOGS_DIR/queue-runner.log"
;;
all)
compose_cmd logs -f &
docker_pid=$!
if [[ -f "$LOGS_DIR/queue-runner.log" ]]; then
tail -f "$LOGS_DIR/queue-runner.log" &
tail_pid=$!
wait "$docker_pid" "$tail_pid"
else
wait "$docker_pid"
fi
;;
*)
echo "Usage: $0 [all|docker|queue]"
exit 1
;;
esac

View File

@@ -1,8 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
"$SCRIPT_DIR/stop.sh"
sleep 2
"$SCRIPT_DIR/start.sh"

View File

@@ -1,140 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
readonly RED='\033[0;31m'
readonly GREEN='\033[0;32m'
readonly YELLOW='\033[1;33m'
readonly BLUE='\033[0;34m'
readonly NC='\033[0m'
log_info() { echo -e "${GREEN}[INFO]${NC} $*"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
log_step() { echo -e "${BLUE}[STEP]${NC} $*"; }
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
WSL_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
AUTOMATION_DIR="$(cd "$WSL_DIR/.." && pwd)"
PROJECT_ROOT="$(cd "$AUTOMATION_DIR/.." && pwd)"
RUNTIME_DIR="$AUTOMATION_DIR/wsl_runtime"
DOCKER_ENV_FILE="$WSL_DIR/.env"
RUNNER_ENV_FILE="$AUTOMATION_DIR/wsl.local.env"
PROJECT_LINK="$HOME/ableton-mcp-ai"
generate_secret() {
openssl rand -hex "${1:-16}" 2>/dev/null || python3 - <<'PY'
import secrets
print(secrets.token_hex(16))
PY
}
ensure_dirs() {
log_step "Creating runtime directories"
mkdir -p \
"$RUNTIME_DIR/logs" \
"$RUNTIME_DIR/pids" \
"$RUNTIME_DIR/data" \
"$AUTOMATION_DIR/reports" \
"$AUTOMATION_DIR/runs" \
"$AUTOMATION_DIR/tasks" \
"$AUTOMATION_DIR/workflows" \
"$WSL_DIR/initdb"
}
ensure_symlink() {
if [[ ! -L "$PROJECT_LINK" ]]; then
ln -sfn "$PROJECT_ROOT" "$PROJECT_LINK"
fi
log_info "Project link: $PROJECT_LINK"
}
write_docker_env() {
if [[ -f "$DOCKER_ENV_FILE" ]]; then
log_info "Docker env already exists: $DOCKER_ENV_FILE"
return
fi
log_step "Generating docker env"
cat > "$DOCKER_ENV_FILE" <<EOF
PROJECT_PATH='$PROJECT_ROOT'
TZ='America/Buenos_Aires'
POSTGRES_USER='postgres'
POSTGRES_PASSWORD='$(generate_secret 16)'
POSTGRES_BOOTSTRAP_DB='postgres'
POSTGRES_PORT='5432'
GITEA_DOMAIN='localhost'
GITEA_ROOT_URL='http://localhost:3000'
GITEA_HTTP_PORT='3000'
GITEA_SSH_DOMAIN='localhost'
GITEA_SSH_PORT='222'
GITEA_ADMIN_USER='giteaadmin'
GITEA_ADMIN_PASSWORD='$(generate_secret 16)'
GITEA_ADMIN_EMAIL='admin@localhost'
GITEA_DB_NAME='gitea'
GITEA_SECURITY_INSTALL_LOCK='true'
GITEA_OFFLINE_MODE='true'
REDIS_PASSWORD='$(generate_secret 16)'
REDIS_PORT='6379'
N8N_HOST='localhost'
N8N_PORT='5678'
N8N_PATH='/'
N8N_WEBHOOK_URL='http://localhost:5678/'
N8N_EDITOR_BASE_URL='http://localhost:5678'
N8N_DB_NAME='n8n'
N8N_ENCRYPTION_KEY='$(generate_secret 32)'
N8N_BASIC_AUTH_ACTIVE='true'
N8N_BASIC_AUTH_USER='admin'
N8N_BASIC_AUTH_PASSWORD='$(generate_secret 16)'
N8N_HOST_ALLOW_LIST='localhost,127.0.0.1'
N8N_EXECUTIONS_MODE='regular'
N8N_LOG_LEVEL='info'
N8N_DIAGNOSTICS_ENABLED='false'
N8N_VERSION_NOTIFICATIONS_ENABLED='false'
N8N_COOKIE_POLICY='lax'
COMPOSE_PROJECT_NAME='abletonmcp'
EOF
chmod 600 "$DOCKER_ENV_FILE"
}
ensure_runner_env() {
if [[ -f "$RUNNER_ENV_FILE" ]]; then
log_info "Runner env already exists: $RUNNER_ENV_FILE"
return
fi
log_step "Generating runner env"
cat > "$RUNNER_ENV_FILE" <<EOF
export ANTHROPIC_BASE_URL=''
export ANTHROPIC_AUTH_TOKEN=''
export GLM_MODEL='glm-5'
export GLM_API_KEY=''
export CODEX_MODEL='gpt-5.4'
export TELEGRAM_BOT_TOKEN=''
export TELEGRAM_CHAT_ID=''
export CODEX_HOME='$AUTOMATION_DIR/wsl_runtime/codex_home'
export GLM_AGENTS_FILE='$AUTOMATION_DIR/glm_agents.team.json'
export POLL_SECONDS='30'
export WATCH='1'
export CONTINUE_ON_ERROR='1'
EOF
chmod 600 "$RUNNER_ENV_FILE"
}
main() {
log_info "Preparing AbletonMCP_AI WSL stack"
ensure_dirs
ensure_symlink
write_docker_env
ensure_runner_env
echo
log_info "Files ready:"
echo " - $DOCKER_ENV_FILE"
echo " - $RUNNER_ENV_FILE"
echo
log_info "Next:"
echo " 1. Review tokens in $RUNNER_ENV_FILE"
echo " 2. Review service passwords in $DOCKER_ENV_FILE"
echo " 3. Run ./install.sh if Docker is not installed"
echo " 4. Run ./start.sh"
}
main "$@"

View File

@@ -1,143 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
readonly RED='\033[0;31m'
readonly GREEN='\033[0;32m'
readonly YELLOW='\033[1;33m'
readonly BLUE='\033[0;34m'
readonly NC='\033[0m'
log_info() { echo -e "${GREEN}[INFO]${NC} $*"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
log_error() { echo -e "${RED}[ERROR]${NC} $*"; }
log_step() { echo -e "${BLUE}[STEP]${NC} $*"; }
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
WSL_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
AUTOMATION_DIR="$(cd "$WSL_DIR/.." && pwd)"
PROJECT_ROOT="$(cd "$AUTOMATION_DIR/.." && pwd)"
RUNNER_ENV_FILE="$AUTOMATION_DIR/wsl.local.env"
DOCKER_ENV_FILE="$WSL_DIR/.env"
COMPOSE_FILE="$WSL_DIR/docker-compose.yml"
RUNTIME_DIR="$AUTOMATION_DIR/wsl_runtime"
LOGS_DIR="$RUNTIME_DIR/logs"
PID_DIR="$RUNTIME_DIR/pids"
START_QUEUE_RUNNER="${START_QUEUE_RUNNER:-1}"
mkdir -p "$LOGS_DIR" "$PID_DIR"
if [[ -f "$RUNNER_ENV_FILE" ]]; then
# shellcheck disable=SC1090
source "$RUNNER_ENV_FILE"
fi
if [[ -f "$DOCKER_ENV_FILE" ]]; then
# shellcheck disable=SC1090
set -a
source "$DOCKER_ENV_FILE"
set +a
fi
compose_cmd() {
docker compose --env-file "$DOCKER_ENV_FILE" -f "$COMPOSE_FILE" "$@"
}
check_prerequisites() {
log_step "Checking prerequisites"
command -v docker >/dev/null || { log_error "Docker is not installed"; exit 1; }
docker compose version >/dev/null || { log_error "Docker Compose plugin is not available"; exit 1; }
docker info >/dev/null || { log_error "Docker daemon is not running"; exit 1; }
[[ -f "$DOCKER_ENV_FILE" ]] || { log_error "Missing docker env: $DOCKER_ENV_FILE"; exit 1; }
[[ -f "$COMPOSE_FILE" ]] || { log_error "Missing compose file: $COMPOSE_FILE"; exit 1; }
}
wait_for_postgres() {
log_info "Waiting for PostgreSQL"
for _ in $(seq 1 60); do
if compose_cmd exec -T postgres pg_isready -U "${POSTGRES_USER:-postgres}" -d "${POSTGRES_BOOTSTRAP_DB:-postgres}" >/dev/null 2>&1; then
return 0
fi
sleep 2
done
log_error "PostgreSQL did not become ready in time"
exit 1
}
wait_for_service_http() {
local service="$1"
local url="$2"
log_info "Waiting for $service"
for _ in $(seq 1 60); do
if curl -fsS "$url" >/dev/null 2>&1; then
return 0
fi
sleep 2
done
log_warn "$service is not healthy yet: $url"
return 1
}
ensure_database() {
local db_name="$1"
if compose_cmd exec -T postgres psql -U "${POSTGRES_USER:-postgres}" -d "${POSTGRES_BOOTSTRAP_DB:-postgres}" -tAc "SELECT 1 FROM pg_database WHERE datname='${db_name}'" | grep -q 1; then
return 0
fi
compose_cmd exec -T postgres psql -U "${POSTGRES_USER:-postgres}" -d "${POSTGRES_BOOTSTRAP_DB:-postgres}" -c "CREATE DATABASE \"${db_name}\""
}
ensure_gitea_admin() {
local user="${GITEA_ADMIN_USER:-giteaadmin}"
local password="${GITEA_ADMIN_PASSWORD:-changeme}"
local email="${GITEA_ADMIN_EMAIL:-admin@localhost}"
if compose_cmd exec -T gitea sh -c "HOME=/tmp /usr/local/bin/gitea admin user list 2>/dev/null | awk 'NR > 1 && \$2 == \"${user}\" { found=1 } END { exit found ? 0 : 1 }'"; then
return 0
fi
compose_cmd exec -T gitea sh -c "HOME=/tmp /usr/local/bin/gitea admin user create --admin --username '${user}' --password '${password}' --email '${email}' --must-change-password=false" >/dev/null 2>&1 || log_warn "Could not auto-create Gitea admin user; complete first-run in UI if needed"
}
start_docker_stack() {
log_step "Starting Docker services"
compose_cmd up -d postgres redis
wait_for_postgres
ensure_database "${GITEA_DB_NAME:-gitea}"
ensure_database "${N8N_DB_NAME:-n8n}"
compose_cmd up -d gitea n8n
wait_for_service_http "Gitea" "http://localhost:${GITEA_HTTP_PORT:-3000}/api/healthz" || true
wait_for_service_http "n8n" "http://localhost:${N8N_PORT:-5678}/healthz" || true
ensure_gitea_admin
}
start_queue_runner() {
if [[ "$START_QUEUE_RUNNER" != "1" ]]; then
log_info "Queue runner startup skipped by START_QUEUE_RUNNER=$START_QUEUE_RUNNER"
return
fi
if command -v systemctl >/dev/null 2>&1 && systemctl is-active abletonmcp-queue-runner.service >/dev/null 2>&1; then
log_info "Queue runner already managed by systemd"
return
fi
local pid_file="$PID_DIR/queue-runner.pid"
if [[ -f "$pid_file" ]] && kill -0 "$(cat "$pid_file")" 2>/dev/null; then
log_info "Queue runner already running"
return
fi
log_step "Starting autonomous queue runner"
nohup bash "$WSL_DIR/run_task_queue.sh" > "$LOGS_DIR/queue-runner.log" 2>&1 &
echo $! > "$pid_file"
log_info "Queue runner PID: $(cat "$pid_file")"
}
main() {
check_prerequisites
start_docker_stack
start_queue_runner
echo
log_info "Stack started"
echo " Gitea: http://localhost:${GITEA_HTTP_PORT:-3000}"
echo " n8n: http://localhost:${N8N_PORT:-5678}"
}
main "$@"

View File

@@ -1,58 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
readonly GREEN='\033[0;32m'
readonly YELLOW='\033[1;33m'
readonly BLUE='\033[0;34m'
readonly RED='\033[0;31m'
readonly NC='\033[0m'
ok() { echo -e "${GREEN}OK${NC} $*"; }
warn() { echo -e "${YELLOW}WARN${NC} $*"; }
fail() { echo -e "${RED}FAIL${NC} $*"; }
step() { echo -e "${BLUE}$*${NC}"; }
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
WSL_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
AUTOMATION_DIR="$(cd "$WSL_DIR/.." && pwd)"
DOCKER_ENV_FILE="$WSL_DIR/.env"
COMPOSE_FILE="$WSL_DIR/docker-compose.yml"
PID_DIR="$AUTOMATION_DIR/wsl_runtime/pids"
LOGS_DIR="$AUTOMATION_DIR/wsl_runtime/logs"
compose_cmd() {
docker compose --env-file "$DOCKER_ENV_FILE" -f "$COMPOSE_FILE" "$@"
}
step "Docker"
if command -v docker >/dev/null 2>&1 && docker info >/dev/null 2>&1; then
ok "docker daemon running"
else
fail "docker daemon unavailable"
fi
echo
step "Compose services"
if command -v docker >/dev/null 2>&1 && [[ -f "$COMPOSE_FILE" ]]; then
compose_cmd ps || true
else
warn "compose file or docker missing"
fi
echo
step "Queue runner"
if [[ -f "$PID_DIR/queue-runner.pid" ]] && kill -0 "$(cat "$PID_DIR/queue-runner.pid")" 2>/dev/null; then
ok "queue runner PID $(cat "$PID_DIR/queue-runner.pid")"
elif command -v systemctl >/dev/null 2>&1 && systemctl is-active abletonmcp-queue-runner.service >/dev/null 2>&1; then
ok "queue runner managed by systemd"
else
warn "queue runner not running"
fi
echo
step "Logs"
if [[ -d "$LOGS_DIR" ]]; then
ls -1 "$LOGS_DIR" | sed 's/^/ - /'
else
warn "no logs directory"
fi

View File

@@ -1,52 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
readonly GREEN='\033[0;32m'
readonly YELLOW='\033[1;33m'
readonly BLUE='\033[0;34m'
readonly NC='\033[0m'
log_info() { echo -e "${GREEN}[INFO]${NC} $*"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
log_step() { echo -e "${BLUE}[STEP]${NC} $*"; }
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
WSL_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
AUTOMATION_DIR="$(cd "$WSL_DIR/.." && pwd)"
DOCKER_ENV_FILE="$WSL_DIR/.env"
COMPOSE_FILE="$WSL_DIR/docker-compose.yml"
PID_DIR="$AUTOMATION_DIR/wsl_runtime/pids"
compose_cmd() {
docker compose --env-file "$DOCKER_ENV_FILE" -f "$COMPOSE_FILE" "$@"
}
stop_runner() {
local pid_file="$1"
if [[ ! -f "$pid_file" ]]; then
return
fi
local pid
pid="$(cat "$pid_file")"
if kill -0 "$pid" 2>/dev/null; then
kill -TERM "$pid" 2>/dev/null || true
sleep 2
kill -KILL "$pid" 2>/dev/null || true
fi
rm -f "$pid_file"
}
main() {
log_step "Stopping queue runner"
stop_runner "$PID_DIR/queue-runner.pid"
echo
log_step "Stopping Docker services"
if command -v docker >/dev/null 2>&1; then
compose_cmd down "$@" || true
else
log_warn "Docker not installed"
fi
log_info "Stack stopped"
}
main "$@"

View File

@@ -1,28 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
LOCAL_ENV_FILE="${LOCAL_ENV_FILE:-$PROJECT_ROOT/automation/wsl.local.env}"
if [[ -f "$LOCAL_ENV_FILE" ]]; then
# shellcheck disable=SC1090
source "$LOCAL_ENV_FILE"
fi
MESSAGE="${1:-}"
if [[ -z "$MESSAGE" ]]; then
exit 0
fi
BOT_TOKEN="${TELEGRAM_BOT_TOKEN:-}"
CHAT_ID="${TELEGRAM_CHAT_ID:-}"
if [[ -z "$BOT_TOKEN" || -z "$CHAT_ID" ]]; then
exit 0
fi
curl -fsS -X POST "https://api.telegram.org/bot${BOT_TOKEN}/sendMessage" \
--data-urlencode "chat_id=${CHAT_ID}" \
--data-urlencode "text=${MESSAGE}" \
--data "disable_web_page_preview=true" >/dev/null

View File

@@ -1,107 +0,0 @@
#!/usr/bin/env python
"""
Script para cargar samples en Ableton MCP AI
Este script guía al usuario para cargar samples manualmente o usa el browser
"""
import os
# Configuración de samples
SAMPLES_CONFIG = {
"kick": r"C:\Users\ren\embeddings\all_tracks\BBH - Primer Impacto - Kick 1.wav",
"clap": r"C:\Users\ren\embeddings\all_tracks\MT Clap & Snare Hit 05.wav",
"hat": r"C:\Users\ren\embeddings\all_tracks\BBH - Primer Impacto - Closed Hat 3.wav",
"bass": r"C:\Users\ren\embeddings\all_tracks\MT_Bass Loop 04 F 125.wav",
}
def generate_instrument_setup_guide():
"""Genera instrucciones detalladas para cargar samples"""
guide = """
╔══════════════════════════════════════════════════════════════════╗
║ CONFIGURACIÓN DE INSTRUMENTOS - HOUSE 90s ║
╚══════════════════════════════════════════════════════════════════╝
Para que suene tu track, necesitas cargar instrumentos en cada track MIDI.
🥁 TRACK 0 - KICK (Rojo):
1. Arrastra "Drum Rack" del browser al track
2. Arrastra tu sample de kick al pad C1 (nota 36)
3. Ajusta volumen a -3dB
👏 TRACK 1 - CLAP (Naranja):
1. Mismo Drum Rack o uno nuevo
2. Arrastra sample de clap/snare al pad D2 (nota 50)
3. Volumen a -6dB
🎩 TRACK 2 - HIHAT (Amarillo):
1. Drum Rack
2. Sample de closed hat al pad F#1 (nota 42)
3. Volumen a -12dB
🎸 TRACK 3 - BASS (Azul):
Opción A (Sampler):
1. Arrastra "Simpler" al track
2. Arrastra loop de bass (MT_Bass Loop 04 F 125.wav)
3. Ajusta para que C3 dispare el sample
Opción B (Synth):
1. Carga "Operator"
2. Preset "Sub Bass" o "Funky Bass"
3. Ajusta envolvente: Attack 5ms, Decay 200ms, Sustain 80%
🎹 TRACK 4 - CHORDS (Purpura):
1. Carga "Wavetable" o "Analog"
2. Preset "House Chords", "Chord Stab" o "Vintage Keys"
3. Añade reverb (Return A) al 20%
═══════════════════════════════════════════════════════════════════
📁 SAMPLES RECOMENDADOS DE TU LIBRERÍA:
Kick: BBH - Primer Impacto - Kick 1.wav
Clap: MT Clap & Snare Hit 05.wav
Hat: BBH - Primer Impacto - Closed Hat 3.wav
Bass: MT_Bass Loop 04 F 125.wav
═══════════════════════════════════════════════════════════════════
⚡ ATAJO RÁPIDO:
Si tienes Drum Rack presets guardados:
1. Busca en el browser: "Drums > Drum Rack"
2. Arrastra a cada track de drums
3. Los clips MIDI ya están programados y sonarán automáticamente
═══════════════════════════════════════════════════════════════════
"""
return guide
def verify_samples():
"""Verifica qué samples existen"""
samples_dir = r"C:\Users\ren\embeddings\all_tracks"
print("\n📂 Verificando samples en librería...")
print(f"Directorio: {samples_dir}")
print("-" * 50)
if not os.path.exists(samples_dir):
print("❌ Directorio no encontrado!")
return False
# Buscar archivos comunes
found = []
for f in os.listdir(samples_dir)[:20]: # Primeros 20
if f.endswith('.wav'):
found.append(f)
print(f"{len(found)} archivos WAV encontrados")
print("\nEjemplos:")
for f in found[:10]:
print(f" - {f}")
return True
if __name__ == "__main__":
print(generate_instrument_setup_guide())
verify_samples()

View File

@@ -1,215 +0,0 @@
# AbletonMCP-AI — Tech House Professional DJ Roadmap
> Repositorio: AbletonMCP_AI | Foco: **Tech House** (122128 BPM)
> Última actualización: 2026-03-28
---
## Estado actual del sistema
El sistema tiene una base sólida:
- `song_generator.py` (~6k líneas): blueprints de secciones, perfiles de arrangement, bus system, gain calibration, device automation por sección
- `vector_manager.py`: semantic search + filtros de duración (evita canciones completas)
- `role_matcher.py`: validación de samples por rol con thresholds y penalizaciones
- `sample_selector.py`: selección de samples con compatibilidad BPM/key
- `audio_resampler.py`: resample layers y análisis
- `server.py` (~7k líneas): herramientas MCP expuestas al AI
---
## 🔴 BUG FIXES — Prioridad crítica
### BF-01: Track "AUDIO ATMOS 2" con canción completa
- **Problema**: El filtro de duración (max 45s) no se aplica consistentemente en todos los paths de carga
- **Causa**: `_build_audio_fallback_sample_paths` en `server.py` tiene glob patterns que ignoran el filtro de `vector_manager`
- **Fix**: Reindexar la librería con `reindex_library` y verificar que `_find_library_file` rechaza archivos donde `soundfile` lanza excepción (duración = -1)
- **Archivo**: `server.py``_find_library_file`, `_build_audio_fallback_sample_paths`
### BF-02: Embeddings desactualizados post-cambios
- **Problema**: Al agregar nuevos samples a la librería, el índice `.sample_embeddings.json` no se reconstruye automáticamente
- **Fix**: Agregar un hash de fingerprint del directorio (mtime o conteo) al índice y validar en cada arranque
- **Archivo**: `vector_manager.py``_load_or_build_index`
### BF-03: Colisión de nombres de track "AUDIO X 2"
- **Problema**: Cuando se crea un segundo track con el mismo nombre, Ableton le agrega "2" automáticamente y luego el sistema no lo encuentra por nombre
- **Fix**: Normalizar búsquedas de tracks usando índice numérico en vez de nombre como identificador primario
- **Archivo**: `server.py``_mute_tracks_for_audio_layers`, `_normalize_track_name`
### BF-04: Linting errors restantes
- **Problema**: Múltiples errores de ruff reportados en `ruff_errors.txt` (principalmente F821 undefined names, E501 line length)
- **Fix**: Pasar `ruff check --fix` y revisar los F821 manualmente
- **Archivos**: `server.py`, `song_generator.py`
### BF-05: `soundfile` excepción silenciosa permite archivos inválidos
- **Problema**: Si `soundfile` falla al leer un archivo, se asigna `duration = -1` pero el archivo igual puede ser insertado en escenarios de fallback
- **Fix**: En `_find_library_file`, `duration < 0` debe ser rechazado explícitamente también en el branch de fallback de `glob`
- **Archivo**: `server.py``_find_library_file`
---
## 🟠 MEJORAS CORE — Tech House específico
### MJ-01: Blueprints de sección optimizados para Tech House DJ
- **Qué**: Los blueprints actuales (`standard`, `extended`, `club`) son genéricos. Tech House DJ requiere intros/outros de 16-32 bars para beatmatching
- **Cambio**:
```python
'tech-house-dj': [
('INTRO DJ', 32, 8, 'intro', 1), # 32 bars solo kick+bass para mezcla
('GROOVE A', 16, 16, 'build', 2),
('VOX TEASE', 8, 20, 'build', 3),
('DROP A', 32, 30, 'drop', 5),
('BREAK', 8, 22, 'break', 1),
('BUILD', 8, 24, 'build', 3),
('DROP B', 32, 32, 'drop', 5),
('OUTRO DJ', 32, 8, 'outro', 1), # 32 bars solo kick+bass para salida
]
```
- **Archivo**: `song_generator.py` → `SECTION_BLUEPRINTS`
### MJ-02: Patrones rítmicos tech house propios
- **Qué**: Los patrones de kick/hat/perc están en `create_drum_pattern` (server.py) como presets genéricos. Tech House usa swing, offbeat hats, y kicks con ghost notes
- **Cambio**: Agregar presets `'tech-house-swing'`, `'tech-house-jackin'`, `'tech-house-minimal'` con:
- Kick en 1 y 3 con variaciones en 2.5 y 3.5
- Hi-hat con swing 16% y offbeats en 1/8
- Clap/snare en 2 y 4 con ghost notes
- **Archivo**: `server.py` → `create_drum_pattern`
### MJ-03: Bass lines tech house
- **Qué**: `create_bassline` genera 4 estilos genéricos. Tech House requiere basslines sincopadas y groovy
- **Cambio**: Agregar estilo `'tech-house'` con notas en posiciones off-beat, slides, y variaciones de velocidad para groove
- **Archivo**: `server.py` → `create_bassline`
### MJ-04: Chord progressions tech house
- **Qué**: `CHORD_PROGRESSIONS` en `song_generator.py` no tiene entradas específicas para tech house
- **Cambio**: Agregar progressiones:
- Am → Fm → Gm (oscura, hipnótica)
- Dm → Am → Dm (loop de dos acordes para drop)
- Cm → Gm (minimalista con tensión)
- **Archivo**: `song_generator.py` → `CHORD_PROGRESSIONS`
### MJ-05: Estilo Latin Tech House
- **Qué**: El sistema tiene menciones de `latin-industrial` (Eli Brown) pero no tiene patrones de percusión latina implementados
- **Cambio**: Agregar preset `'latin-tech-house'` con:
- Conga / bongo patterns como perc layer
- Bass con notas sincopadas al estilo afro-percusivo
- Vocal shots ("ey", "come on") en offbeats
- **Archivo**: `song_generator.py`, `server.py`
### MJ-06: Genre keyword expansion en VectorManager
- **Qué**: Las búsquedas semánticas usan strings genéricos. Tech house tiene vocabulario específico
- **Cambio**: Agregar diccionario de términos preferidos por género que enriquecen el query:
```python
GENRE_SEARCH_TERMS = {
'tech-house': ['groovy', 'driving', 'punchy', 'jackin', 'swinging', 'hypnotic'],
'house': ['deep', 'soulful', 'warm', 'classic'],
...
}
```
- **Archivo**: `vector_manager.py` o `server.py`
### MJ-07: Reindex automático al detectar cambios en librería
- **Qué**: El índice de embeddings solo se reconstruye manualmente. Si el usuario agrega samples, no se detectan
- **Cambio**: Al iniciar `VectorManager`, comparar el conteo de archivos actual vs el del índice. Si difieren, rebuild automático
- **Archivo**: `vector_manager.py` → `_load_or_build_index`
---
## 🟡 MEJORAS DJ PRO — Funcionalidades de DJ profesional
### DJ-01: Track Stems export / bus routing visible
- **Qué**: Un DJ profesional necesita poder exportar stems (kick, bass, music, fx) separados
- **Cambio**: Agregar herramienta `export_stems_config()` que configura los buses para exportación de stems individual, nombrando y coloreando cada bus consistentemente
- **Archivo**: `server.py` (nuevo tool)
### DJ-02: Harmonic mixing — Camelot wheel
- **Qué**: El sistema elige keys pero no verifica compatibilidad con Camelot wheel para mezcla armónica
- **Cambio**: Agregar función `get_compatible_keys(current_key)` que devuelve keys compatibles en la rueda de Camelot (±1 tono, relativo mayor/menor). Usar en `suggest_key_change`
- **Archivo**: `server.py` → `suggest_key_change`
### DJ-03: BPM grid automático — Sync markers
- **Qué**: Al generar una canción con intro DJ de 32 bars, colocar marcadores de Ableton (`locators`) en los puntos exactos de cada sección para que el DJ pueda saltar entre puntos
- **Cambio**: Usar el comando `create_arrangement_locator` de Ableton API para marcar cada sección
- **Archivo**: `server.py`, `Remote_Script.py` (agregar comando de socket)
### DJ-04: Loop regions automáticas
- **Qué**: Marcar los drops como loop regions en Ableton para que el DJ pueda activar el loop con un botón
- **Cambio**: Al generar la canción, colocar punch-in / punch-out en los drops principales
- **Archivo**: `server.py`
### DJ-05: Energy curve explícita
- **Qué**: El sistema tiene `ROLE_ACTIVITY` con valores de energía por sección pero no hay una curva visible para el usuario
- **Cambio**: Al terminar la generación, imprimir (en el manifest) la curva de energía sección a sección: `[INTRO: 25%] → [BUILD: 70%] → [DROP: 100%]...`
- **Archivo**: `server.py` → manifest / `get_generation_manifest`
### DJ-06: Referencia de track real — Eli Brown style
- **Qué**: `REFERENCE_TRACK_PROFILES` tiene "Eli Brown - Me Gusta" definido pero no se puede cargar automáticamente una referencia para análisis A/B
- **Cambio**: Hacer funcional el sistema de referencia: si el usuario pone un archivo en `librerias/reference/`, que sea analizable e influya en BPM, key, y energy curve de la generación
- **Archivo**: `server.py`, `audio_resampler.py`, `reference_listener.py`
---
## 🟢 NICE TO HAVE — Calidad de vida
### NTH-01: Preview de canción antes de generar
- **Qué**: El sistema genera todo de golpe sin preview. Poder ver primero el "blueprint" (qué tracks, qué samples, qué estructura) antes de ejecutar
- **Cambio**: Agregar `preview_generation(genre, style, key, bpm)` que devuelve el manifest sin crear nada en Ableton
- **Archivo**: `server.py`, `song_generator.py`
### NTH-02: Regeneración selectiva de secciones
- **Qué**: Si el drop no quedó bien, hay que regenerar todo. Debería poder regenerarse solo el drop
- **Cambio**: Agregar `regenerate_section(section_name)` que borra los clips de esa sección y los regenera
- **Archivo**: `server.py`
### NTH-03: Historial de generaciones
- **Qué**: Solo se guarda el último manifest. Debería haber un historial de las últimas 5 generaciones
- **Cambio**: Guardar manifests en archivos `.json` con timestamp en `librerias/generations/`
- **Archivo**: `server.py` → `_store_generation_manifest`
### NTH-04: Color coding consistente por género
- **Qué**: Los colores de tracks son estáticos. Tech House podría tener paleta propia (naranja, azul oscuro)
- **Cambio**: Agregar `GENRE_COLOR_PALETTES` y aplicar al generar tracks
- **Archivo**: `song_generator.py` → `TRACK_COLORS`
### NTH-05: Sample diversity mejorada
- **Qué**: Si la librería tiene 3 kicks, el sistema puede usar el mismo kick en 2 generaciones seguidas
- **Cambio**: Existe `reset_diversity_memory` pero no hay persistencia entre sesiones. Guardar el historial de samples usados en un JSON local
- **Archivo**: `sample_selector.py`
### NTH-06: Validación de routing en tiempo real
- **Qué**: `validate_set` existe pero no se llama automáticamente al generar
- **Cambio**: Al terminar `generate_track`, llamar automáticamente a `detect_common_issues` y mostrar resumen con numero de errores/warnings
- **Archivo**: `server.py` → `generate_track`
### NTH-07: howto.md actualizado para Tech House
- **Qué**: El `howto.md` documenta el sistema genérico. Agregar sección específica de "Cómo generar Tech House profesional" con ejemplos de prompts, flujos de trabajo DJ, y settings recomendados
- **Archivo**: `howto.md`
---
## Orden de ejecución recomendado
| Prioridad | ID | Nombre | Esfuerzo |
|---|---|---|---|
| 1 | BF-01 | Full song en ATMOS track | 1h |
| 2 | BF-02 | Embeddings auto-rebuild | 2h |
| 3 | BF-03 | Colisión de nombres | 2h |
| 4 | MJ-01 | Blueprints DJ 32-bar intro/outro | 1h |
| 5 | MJ-02 | Drum patterns tech house | 2h |
| 6 | DJ-02 | Camelot wheel | 2h |
| 7 | MJ-03 | Bassline tech house | 1h |
| 8 | MJ-05 | Latin tech house preset | 3h |
| 9 | DJ-03 | BPM locators automáticos | 4h |
| 10 | DJ-06 | Referencia de track real | 4h |
| 11 | NTH-01 | Preview pre-generación | 3h |
| 12 | NTH-06 | Auto-validación post-generación | 1h |
| 13 | BF-04 | Linting cleanup | 2h |
---
## Notas arquitectónicas para el salto a Tech House
1. **BPM default**: cambiar `default_bpm` de `tech-house` de 125 a **126** (sweet spot del género actual)
2. **Key pool**: priorizar `Am`, `Fm`, `Dm` → más oscuras y groovy que las opciones actuales
3. **Swing**: el swing del 8% actual en hats es insuficiente. Tech House moderno usa 12-16%
4. **Sidechain pump**: el threshold actual de -22dB en bass bus es correcto, pero el release de 0.12s es lento. Bajar a 0.08-0.10s para más pump
5. **Atmos tracks**: el vol de 0.50 en `ROLE_MIX['atmos']` es correcto, pero usar filtros HPF altos (>1kHz) para que no compitan con el sub

View File

@@ -1,227 +0,0 @@
"""
Setup Returns and Master Chain for Ableton Live 12
Creates return tracks and configures master chain
"""
import socket
import json
import time
import os
from datetime import datetime
from typing import Dict, Any
LOG_FILE = r"C:\Users\ren\Documents\Ableton\Logs\returns_master.txt"
def log_message(message):
"""Log message to file and console"""
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
log_line = f"[{timestamp}] {message}"
print(log_line)
os.makedirs(os.path.dirname(LOG_FILE), exist_ok=True)
with open(LOG_FILE, "a", encoding="utf-8") as f:
f.write(log_line + "\n")
class AbletonClient:
def __init__(self, host="127.0.0.1", port=9877, timeout=15.0):
self.host = host
self.port = port
self.timeout = timeout
def send(self, command_type: str, params: Dict[str, Any] = None) -> Dict[str, Any]:
"""Send command to Ableton runtime and get response"""
payload = json.dumps({
"type": command_type,
"params": params or {},
}).encode("utf-8") + b"\n"
with socket.create_connection((self.host, self.port), timeout=self.timeout) as sock:
sock.sendall(payload)
reader = sock.makefile("r", encoding="utf-8")
try:
line = reader.readline()
finally:
reader.close()
try:
sock.shutdown(socket.SHUT_RDWR)
except OSError:
pass
if not line:
return {"status": "error", "message": f"No response for command: {command_type}"}
return json.loads(line)
def main():
log_message("=" * 60)
log_message("STARTING RETURNS AND MASTER CHAIN SETUP")
log_message("=" * 60)
client = AbletonClient()
try:
# Get current session info
log_message("\n--- Getting session info ---")
session_info = client.send("get_session_info")
log_message(f"Session: tracks={session_info.get('result', {}).get('num_tracks', 'N/A')}, returns={session_info.get('result', {}).get('num_return_tracks', 'N/A')}")
# ========================================
# CREATE RETURN TRACKS
# ========================================
log_message("\n" + "=" * 60)
log_message("CREATING RETURN TRACKS")
log_message("=" * 60)
# 1. A-REVERB (Large Hall)
log_message("\n--- Creating A-REVERB return track ---")
reverb_response = client.send("setup_return_track", {
"preset": "reverb_large",
"name": "A-REVERB"
})
log_message(f"A-REVERB: {reverb_response.get('status')} - {json.dumps(reverb_response.get('result', reverb_response.get('message')), indent=2)}")
if reverb_response.get("status") == "success":
return_index = reverb_response.get("result", {}).get("index", 0)
log_message(f"Setting A-REVERB volume to 0.70...")
vol_response = client.send("set_track_volume", {
"track_index": return_index,
"volume": 0.70,
"track_type": "return"
})
log_message(f"Volume set: {vol_response.get('status')}")
# 2. B-DELAY (Ping Pong)
log_message("\n--- Creating B-DELAY return track ---")
delay_response = client.send("setup_return_track", {
"preset": "delay_pingpong",
"name": "B-DELAY"
})
log_message(f"B-DELAY: {delay_response.get('status')} - {json.dumps(delay_response.get('result', delay_response.get('message')), indent=2)}")
if delay_response.get("status") == "success":
return_index = delay_response.get("result", {}).get("index", 1)
log_message(f"Setting B-DELAY volume to 0.65...")
vol_response = client.send("set_track_volume", {
"track_index": return_index,
"volume": 0.65,
"track_type": "return"
})
log_message(f"Volume set: {vol_response.get('status')}")
# 3. C-COMPRESSOR (Parallel compression for sidechain pumping)
log_message("\n--- Creating C-COMPRESSOR return track ---")
comp_response = client.send("setup_return_track", {
"preset": "parallel_comp",
"name": "C-COMPRESSOR"
})
log_message(f"C-COMPRESSOR: {comp_response.get('status')} - {json.dumps(comp_response.get('result', comp_response.get('message')), indent=2)}")
if comp_response.get("status") == "success":
return_index = comp_response.get("result", {}).get("index", 2)
log_message(f"Setting C-COMPRESSOR volume to 0.80...")
vol_response = client.send("set_track_volume", {
"track_index": return_index,
"volume": 0.80,
"track_type": "return"
})
log_message(f"Volume set: {vol_response.get('status')}")
# ========================================
# MASTER CHAIN SETUP
# ========================================
log_message("\n" + "=" * 60)
log_message("SETTING UP MASTER CHAIN")
log_message("=" * 60)
# Get current master devices
log_message("\n--- Getting current master devices ---")
master_devices = client.send("get_devices", {
"track_type": "master",
"track_index": 0
})
devices_list = master_devices.get("result", [])
if isinstance(devices_list, list):
log_message(f"Current master devices: {[d.get('name', '?') if isinstance(d, dict) else str(d) for d in devices_list]}")
else:
log_message(f"Master devices response: {master_devices}")
# Setup master chain
log_message("\n--- Loading master chain devices ---")
client.timeout = 30.0
master_chain_response = client.send("setup_master_chain", {
"devices": ["Utility", "EQ Eight", "Compressor", "Limiter"],
"parameters": {
"Utility": {
"Gain": 0.0
},
"EQ Eight": {
"Mode": "Stereo"
},
"Compressor": {
"Threshold": -18.0,
"Ratio": 2.0,
"Attack": 10.0,
"Release": 80.0,
"Makeup": 2.0
},
"Limiter": {
"Ceiling": -0.3,
"Release": 50.0
}
}
})
log_message(f"Master chain: {master_chain_response.get('status')} - {json.dumps(master_chain_response.get('result', master_chain_response.get('message')), indent=2)}")
# Set master volume to 0.85
log_message("\n--- Setting master volume to 0.85 ---")
master_vol_response = client.send("set_track_volume", {
"track_index": 0,
"volume": 0.85,
"track_type": "master"
})
log_message(f"Master volume: {master_vol_response.get('status')}")
# ========================================
# VERIFICATION
# ========================================
log_message("\n" + "=" * 60)
log_message("VERIFICATION")
log_message("=" * 60)
# Get final session info
log_message("\n--- Final session info ---")
final_session = client.send("get_session_info")
result = final_session.get("result", {})
log_message(f"Tracks: {result.get('num_tracks')}, Returns: {result.get('num_return_tracks')}, Scenes: {result.get('num_scenes')}")
# Get final master devices
log_message("\n--- Final master devices ---")
final_master = client.send("get_devices", {
"track_type": "master",
"track_index": 0
})
devices_list = final_master.get("result", [])
if isinstance(devices_list, list):
for d in devices_list:
if isinstance(d, dict):
log_message(f" - {d.get('name', '?')}")
# Verify return tracks
log_message("\n--- Return tracks ---")
for i in range(3):
ret_info = client.send("get_track_info", {
"track_index": i,
"track_type": "return"
})
result = ret_info.get("result", {})
log_message(f" Return {i}: {result.get('name', '?')} - Volume: {result.get('volume', '?'):.2f}" if isinstance(result.get('volume'), (int, float)) else f" Return {i}: {result.get('name', '?')}")
log_message("\n" + "=" * 60)
log_message("SETUP COMPLETE")
log_message("=" * 60)
except Exception as e:
log_message(f"Error: {e}")
import traceback
log_message(traceback.format_exc())
if __name__ == "__main__":
main()

View File

@@ -1,27 +0,0 @@
@echo off
echo ============================================
echo AbletonMCP-AI Server
echo ============================================
echo.
echo Iniciando servidor MCP...
echo Conectando a Ableton en localhost:9877
echo.
echo Asegurate de que:
echo 1. Ableton Live 12 esta abierto
echo 2. El Control Surface 'AbletonMCP_AI' esta seleccionado
echo en Preferencias ^> Link/Tempo/MIDI
echo.
echo Presiona Ctrl+C para detener
echo ============================================
echo.
cd /d "%~dp0\MCP_Server"
python server.py
if errorlevel 1 (
echo.
echo ERROR: No se pudo iniciar el servidor
echo Verifica que Python esta instalado y en el PATH
echo.
pause
)

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.