From 5b804dbc8cad6a77ad50b26dca0cd0a083675e68 Mon Sep 17 00:00:00 2001 From: renato97 Date: Sun, 29 Mar 2026 12:57:49 -0300 Subject: [PATCH] chore: clean repo and ignore runtime artifacts --- .gitignore | 29 +- AbletonMCP_AI/MCP_Server/scan_log.txt | Bin 820 -> 0 bytes AbletonMCP_AI/diversity_memory.json | 26 - AbletonMCP_AI_BAK_20260328_200801/.gitignore | 46 - AbletonMCP_AI_BAK_20260328_200801/CLAUDE.md | 727 -- .../MCP_Server/ABLETUNES_TEMPLATE_NOTES.md | 39 - .../MCP_Server/SAMPLE_SYSTEM_README.md | 203 - .../MCP_Server/__init__.py | 26 - .../MCP_Server/agent11_harmony_review.py | 318 - .../MCP_Server/agent17_sample_loader.py | 192 - .../MCP_Server/agent7_vocals.py | 104 - .../MCP_Server/agent8_fx_transitions.py | 102 - .../MCP_Server/agent9_perc_loader.py | 184 - .../MCP_Server/audio_analyzer.py | 681 -- .../MCP_Server/audio_resampler.py | 2466 ------- .../MCP_Server/enhanced_device_automation.py | 431 -- .../MCP_Server/fx_group_loader.py | 170 - .../MCP_Server/reference_listener.py | 4834 ------------- .../MCP_Server/reference_stem_builder.py | 264 - .../MCP_Server/requirements.txt | 13 - .../MCP_Server/retrieval_benchmark.py | 525 -- .../MCP_Server/role_matcher.py | 469 -- .../MCP_Server/sample_index.py | 308 - .../MCP_Server/sample_manager.py | 1010 --- .../MCP_Server/sample_system_demo.py | 244 - .../MCP_Server/segment_rag_builder.py | 198 - .../MCP_Server/server_v2.py | 1366 ---- .../MCP_Server/socket_smoke_test.py | 798 --- .../MCP_Server/song_generator.py | 6268 ----------------- .../MCP_Server/template_analyzer.py | 177 - .../MCP_Server/vector_manager.py | 452 -- AbletonMCP_AI_BAK_20260328_200801/README.md | 222 - .../Remote_Script.py | 943 --- .../automation/glm_agents.example.json | 14 - .../automation/glm_agents.team.json | 30 - .../automation/invoke_codex_review.ps1 | 94 - .../reports/glm_task_001_benchmark_check.json | 401 -- .../automation/run_glm_codex_loop.ps1 | 157 - .../automation/run_glm_cycle.ps1 | 162 - .../automation/run_task_queue.ps1 | 141 - .../automation/send_telegram_notification.ps1 | 33 - .../automation/wsl/ableton-glm-loop.service | 18 - .../automation/wsl/bootstrap_wsl_runtime.sh | 53 - .../automation/wsl/docker-compose.yml | 163 - .../wsl/initdb/01-init-multiple-dbs.sh | 18 - .../automation/wsl/install_service.sh | 12 - .../automation/wsl/run_glm_codex_loop.sh | 59 - .../automation/wsl/run_glm_cycle.sh | 92 - .../automation/wsl/run_task_queue.sh | 106 - .../automation/wsl/scripts/install.sh | 281 - .../automation/wsl/scripts/install_systemd.sh | 21 - .../automation/wsl/scripts/logs.sh | 39 - .../automation/wsl/scripts/restart.sh | 8 - .../automation/wsl/scripts/setup.sh | 140 - .../automation/wsl/scripts/start.sh | 143 - .../automation/wsl/scripts/status.sh | 58 - .../automation/wsl/scripts/stop.sh | 52 - .../automation/wsl/send_telegram.sh | 28 - .../load_samples.py | 107 - AbletonMCP_AI_BAK_20260328_200801/roadmap.md | 215 - .../setup_returns_master.py | 227 - .../start_server.bat | 27 - Axiom_25_Classic/Preset.syx | Bin 1326 -> 0 bytes Axiom_49_61_Classic/Preset.syx | Bin 1731 -> 0 bytes BCF2000/Preset.syx | Bin 9385 -> 0 bytes BCR2000/Preset.syx | Bin 10673 -> 0 bytes KONTROL49/Preset.syx | Bin 780 -> 0 bytes MPD32/Preset.syx | Bin 296309 -> 0 bytes MPK25/Preset.syx | Bin 682 -> 0 bytes MPK49/Preset.syx | Bin 309310 -> 0 bytes MPK61/Preset.syx | Bin 1039 -> 0 bytes MPK88/Preset.syx | Bin 1039 -> 0 bytes Push/Preset.syx | Bin 121030 -> 0 bytes Push/Setup.syx | Bin 8 -> 0 bytes .../firmware/app_push2_stable_1.0.71.upgrade | Bin 62720 -> 0 bytes Roland_A_PRO/Preset.syx | Bin 6345 -> 0 bytes microKONTROL/Preset.syx | Bin 103 -> 0 bytes 77 files changed, 28 insertions(+), 26706 deletions(-) delete mode 100644 AbletonMCP_AI/MCP_Server/scan_log.txt delete mode 100644 AbletonMCP_AI/diversity_memory.json delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/.gitignore delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/CLAUDE.md delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/ABLETUNES_TEMPLATE_NOTES.md delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/SAMPLE_SYSTEM_README.md delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/__init__.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/agent11_harmony_review.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/agent17_sample_loader.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/agent7_vocals.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/agent8_fx_transitions.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/agent9_perc_loader.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/audio_analyzer.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/audio_resampler.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/enhanced_device_automation.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/fx_group_loader.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/reference_listener.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/reference_stem_builder.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/requirements.txt delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/retrieval_benchmark.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/role_matcher.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/sample_index.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/sample_manager.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/sample_system_demo.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/segment_rag_builder.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/server_v2.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/socket_smoke_test.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/song_generator.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/template_analyzer.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/MCP_Server/vector_manager.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/README.md delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/Remote_Script.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/glm_agents.example.json delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/glm_agents.team.json delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/invoke_codex_review.ps1 delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/reports/glm_task_001_benchmark_check.json delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/run_glm_codex_loop.ps1 delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/run_glm_cycle.ps1 delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/run_task_queue.ps1 delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/send_telegram_notification.ps1 delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/wsl/ableton-glm-loop.service delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/wsl/bootstrap_wsl_runtime.sh delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/wsl/docker-compose.yml delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/wsl/initdb/01-init-multiple-dbs.sh delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/wsl/install_service.sh delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/wsl/run_glm_codex_loop.sh delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/wsl/run_glm_cycle.sh delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/wsl/run_task_queue.sh delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/install.sh delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/install_systemd.sh delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/logs.sh delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/restart.sh delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/setup.sh delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/start.sh delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/status.sh delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/stop.sh delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/automation/wsl/send_telegram.sh delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/load_samples.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/roadmap.md delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/setup_returns_master.py delete mode 100644 AbletonMCP_AI_BAK_20260328_200801/start_server.bat delete mode 100644 Axiom_25_Classic/Preset.syx delete mode 100644 Axiom_49_61_Classic/Preset.syx delete mode 100644 BCF2000/Preset.syx delete mode 100644 BCR2000/Preset.syx delete mode 100644 KONTROL49/Preset.syx delete mode 100644 MPD32/Preset.syx delete mode 100644 MPK25/Preset.syx delete mode 100644 MPK49/Preset.syx delete mode 100644 MPK61/Preset.syx delete mode 100644 MPK88/Preset.syx delete mode 100644 Push/Preset.syx delete mode 100644 Push/Setup.syx delete mode 100644 Push2/firmware/app_push2_stable_1.0.71.upgrade delete mode 100644 Roland_A_PRO/Preset.syx delete mode 100644 microKONTROL/Preset.syx diff --git a/.gitignore b/.gitignore index 7010820..4147e43 100644 --- a/.gitignore +++ b/.gitignore @@ -95,4 +95,31 @@ nul *.sample_embeddings.json # AbletonMCP_AI generated audio -AppData/ \ No newline at end of file +AppData/ + +# Local backups and archives +AbletonMCP_AI_BAK_*/ +_archive/ + +# Ableton bundled controller content kept only on disk +Axiom_25_Classic/ +Axiom_49_61_Classic/ +BCF2000/ +BCR2000/ +KONTROL49/ +MPD32/ +MPK25/ +MPK49/ +MPK61/ +MPK88/ +Push/ +Push2/ +Roland_A_PRO/ +microKONTROL/ + +# AbletonMCP_AI runtime state +AbletonMCP_AI/diversity_memory.json +AbletonMCP_AI/MCP_Server/scan_log.txt +AbletonMCP_AI/MCP_Server/*.log +AbletonMCP_AI/MCP_Server/health_check_result.json +*.bak diff --git a/AbletonMCP_AI/MCP_Server/scan_log.txt b/AbletonMCP_AI/MCP_Server/scan_log.txt deleted file mode 100644 index b5a93143d451a4c8ceca742eb6e9b3c90d5e79e9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 820 zcmb`F%Syvg5QhJ?;5&qFY|(;liwII$a3gA$ULZ}<8cZ+bq*4)mboHBwp{=+PB&0oO z&dh(y`TSVwMU@`4&|Hyvy!H&0t5C+OrBekuN-A?rbw=fgdf|KPIMK28wWo8sHuWC$ zND0U>v$nIILu$K>U3!eAIeph}kDb=aBFE^1TA^=>?h*$VXq&KZdba<~(MsR2F z<@DyPQuo{TslPhzfwoDd1IDfHJ((MjXU-S7t6EE@f!1(mXsBSfynnhHYDKOEey&-? zo^hjefX|Yt60a$2wJS>@*E6Vn&SpF@xG7vscg?ZQwB*dbWu-Pb9O%k%O<7<%1t&QA ztABT62zYD3X_E{05U5#xcX)mSqqE~>JN^G!KRCmPY)pAfmK%?j#YA1#+uYL#eQQ^2 qzQT9n=E3fx-PFXhn&ZfJmPkV4D_q&Fj(NxYCVFDJU=_H_mg5)OhkDfj diff --git a/AbletonMCP_AI/diversity_memory.json b/AbletonMCP_AI/diversity_memory.json deleted file mode 100644 index 5ffe5b1..0000000 --- a/AbletonMCP_AI/diversity_memory.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "used_families": { - "acoustic": 8 - }, - "used_paths": { - "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\Dubdogz & Jude & Frank - ININNA TORA (Extended Version) [@danielcarmona_dj].mp3": 1, - "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Clap 5.wav": 2, - "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Open Hat 9.wav": 1, - "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Open Hat 3.wav": 1, - "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Kick Hit 03.wav": 1, - "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Clap & Snare Hit 12.wav": 1, - "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Hat Hit 07.wav": 2, - "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Open Hat 5.wav": 1, - "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Kick Hit 10.wav": 1, - "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Clap & Snare Hit 14.wav": 1, - "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\Kit_01_OHH_A#_125.wav": 3, - "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Kick Hit 02.wav": 1, - "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Clap 8.wav": 1, - "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Closed Hat 6.wav": 1, - "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Kick 4.wav": 1, - "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Open Hat 10.wav": 1 - }, - "generation_count": 7, - "last_updated": "2026-03-29T01:28:32.412286", - "version": "1.0" -} \ No newline at end of file diff --git a/AbletonMCP_AI_BAK_20260328_200801/.gitignore b/AbletonMCP_AI_BAK_20260328_200801/.gitignore deleted file mode 100644 index adc2402..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/.gitignore +++ /dev/null @@ -1,46 +0,0 @@ -__pycache__/ -*.py[cod] -*.pyo -.pytest_cache/ -.mypy_cache/ -.ruff_cache/ -.venv/ -venv/ -.idea/ -.vscode/ -*.log -*.tmp -*.bak -*.asd -*.als -*.wav -*.aif -*.aiff -*.flac -*.ogg -*.mp3 -exports/ -render/ -renders/ -stems/ -temp/ -tmp/ -/automation/telegram.local.json -/automation/wsl_runtime/ -/automation/wsl.local.env -/automation/wsl/.env -/automation/runs/ -/automation/.task_queue.tmp* - -# Temp and debug files -*_errors*.txt -*.patch -fix.py -update_opencode.py -grant_permissions.py -GLM_TASK_*_REPORT.md -glmwork.md - -# Library paths (user-specific) -librerias/ -sample/ diff --git a/AbletonMCP_AI_BAK_20260328_200801/CLAUDE.md b/AbletonMCP_AI_BAK_20260328_200801/CLAUDE.md deleted file mode 100644 index 02fc672..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/CLAUDE.md +++ /dev/null @@ -1,727 +0,0 @@ -# AbletonMCP-AI Full Handoff - -This file is the broadest handoff in the repo. - -If another AI needs to retake the project with minimal context loss, this is the file to read first. - -Project root: -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI` - -Current local branch: -- `main` - -Last pushed commit at the moment this file was updated: -- `2a0d2f3dbf5f89b18690fee2a2659957f81b8191` - -## Read Order - -Read in this order: - -1. `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\CLAUDE.md` -2. `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\README.md` -3. `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\AI_HANDOFF.md` -4. `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\ARCHITECTURE.md` -5. `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\MCP_TOOLS.md` -6. `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\REMOTE_PROTOCOL.md` -7. `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\PROJECT_CONTEXT.md` -8. `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\GPU_DIRECTML.md` - -Useful secondary docs: - -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\ABLETUNES_TEMPLATE_NOTES.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\SAMPLE_SYSTEM_README.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\ROADMAP_MASTER_GLM.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\automation\README.md` - -## What This Project Is - -AbletonMCP-AI is a hybrid system to control Ableton Live 12 from MCP, generate long editable arrangements, analyze reference tracks, retrieve similar material from a local sample library, and build original projects that feel closer to a real producer workflow than to a loop toy. - -The desired output is: - -- always Arrangement View -- editable tracks and clips -- many specialized roles -- buses and returns -- original output, not stems from the reference track - -## Core Design Rules That Must Not Be Lost - -- Arrangement-first is mandatory. -- Reference audio is for analysis, not plagiarism. -- Do not use stems from the target song in the final output. -- The system must keep working even if the M4L path is incomplete. -- Stable fallback is better than a flashy broken feature. -- The local sample library is the primary sound source. -- Validation and diagnosis matter because the stack is large and brittle. - -## Important Paths - -Main runtime: -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\__init__.py` - -MCP server: -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\server.py` - -Music generator: -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\song_generator.py` - -Reference analysis: -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\reference_listener.py` - -Sample selection: -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\sample_selector.py` - -Audio resampling: -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\audio_resampler.py` - -Socket smoke test: -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\socket_smoke_test.py` - -Segment RAG builder: -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\segment_rag_builder.py` - -Local library: -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\all_tracks` - -Reference folder: -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\sample` - -Ableton log: -- `C:\Users\ren\AppData\Roaming\Ableton\Live 12.0.15\Preferences\Log.txt` - -Recovery popup file: -- `C:\Users\ren\AppData\Roaming\Ableton\Live 12.0.15\Preferences\CrashRecoveryInfo.cfg` - -User library: -- `C:\Users\ren\Documents\Ableton\User Library` - -## External Assets And References Used During Development - -Reference tracks that shaped the direction: - -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\sample\Eli Brown x GeezLy - Me Gusta.mp3` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\sample\Mr. Pauer, Goyo - Quimica (Video Oficial).mp3` - -Producer template reference pack: - -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\Abletunes_Free_Templates_Pack` - -The reference tracks were used for: - -- BPM and key estimation -- section and energy direction -- sample retrieval guidance -- stylistic remake goals - -They were not supposed to be used as final stems. - -## The Story From The Beginning Until Now - -### Phase 0: Initial Goal - -The original goal was not only to make sounds in Ableton, but to give the system the ability to: - -- receive a prompt or a reference MP3 -- understand the style and structure -- choose similar sounds from the local library -- generate a long arrangement that feels professionally produced -- keep the result editable inside Ableton - -Very early it became clear that a plain Session View loop machine was not enough. - -### Phase 1: Make The Remote Script Actually Work - -The first major work was stabilizing the remote layer between MCP and Ableton. - -Main problems solved in that phase: - -- command naming mismatches between MCP and the Remote Script -- parameter normalization like `track_index`, `clip_index`, `scene_index` -- note writing API mismatch in Live -- socket protocol mismatch -- stale or broken sample manager initialization - -This phase made the project usable enough to: - -- create tracks -- create clips -- write MIDI -- query session state -- build the first generated projects - -### Phase 2: Arrangement-First Pivot - -At that point the output still behaved too much like Session clips and loops. The user explicitly wanted to see everything in Arrangement View. - -That created the second major architectural pivot: - -1. generate blueprint in Session -2. commit Session to Arrangement in ordered scene playback -3. place audio fallback and overlays in Arrangement - -This became the stable route. - -Important lesson: - -- precreating certain audio tracks before the Session to Arrangement commit produced silent or broken sets -- the stable route is still: Session blueprint first, Arrangement commit second, audio layers after that - -### Phase 3: Richer Project Shape - -Once Arrangement-first worked, the next problem was musical complexity. The output felt like repeated loops. - -The generator was expanded with: - -- more track roles -- section-aware pattern generation -- richer drums, bass, melodic and FX layers -- scene naming and locators -- guide tracks -- more realistic arrangement structures - -Important roles that became standard: - -- kick -- clap -- snare fill -- hat closed -- hat open -- top loop -- percussion -- ride -- tom fill -- sub bass -- bass -- drone -- chords -- pluck -- vocal chop -- stab -- pad -- arp -- lead -- counter -- crash -- reverse FX -- riser FX -- impact FX -- atmos -- plus many `AUDIO ...` tracks - -### Phase 4: Local Library As Primary Source - -The next major issue was weak selection quality. At one point the code was pointed to a small mirror path and not to the real large library. - -The real usable library was: - -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\all_tracks` - -That directory had hundreds of real audio assets, while the old fallback mirror only had a few dozen. - -Fixing that changed the quality of: - -- drum matching -- bass loop matching -- vocal loop matching -- FX and atmos selection - -This was a major turning point because the generated output stopped sounding starved. - -### Phase 5: Reference-Led Reconstruction - -After the library path was fixed, the project started using references more seriously. - -The target workflow became: - -- analyze the reference -- infer BPM, key, energy, style and section behavior -- find similar sounds in the local library -- reconstruct a new original track - -There was a temporary detour where stems of a reference song were materialized for analysis, but that was explicitly rejected because the desired product is not a stem-based copy workflow. - -The rule became strict: - -- no final stems from the reference -- only original reconstruction using local assets and generated MIDI - -### Phase 6: Buses, Returns, Master, And Production Logic - -The project then moved from "long loop" to "production-shaped session". - -Major additions: - -- real bus tracks -- return tracks -- track routing into buses -- role-based mixing -- return snapshots by section -- master chain blueprint - -Typical bus layout now: - -- drums -- bass -- music -- vocal -- FX - -Typical return layout now: - -- space -- echo -- heat -- glue - -This phase made the result feel more like a produced project and less like independent loops. - -### Phase 7: Template Analysis - -The project analyzed professional Abletunes templates to absorb real producer patterns. - -Main conclusions: - -- professional templates are Arrangement-heavy -- there are many specialized layers, not just one drum loop and one bass loop -- sidechain triggers, transitions, buses, returns and printed audio are common -- arrangement blocks are often 16, 32 or 64 bars -- heavy automation is normal - -This analysis informed later changes in: - -- section shape -- layer count -- transition behavior -- bus logic - -### Phase 8: Audio Fallback And Hybrid Output - -The system learned to combine: - -- MIDI and stock instruments -- local audio loops and hits -- special `AUDIO ...` overlays - -This was critical because: - -- sometimes stock-device generation gives editability and musical logic -- sometimes local audio assets give the genre-specific realism that MIDI alone cannot deliver - -So the stack became hybrid by design, not by accident. - -### Phase 9: Audio Resampling And Derived FX - -Then came the derived transition layer. - -The system added support for: - -- reverse FX -- riser -- downlifter -- stutter - -This area improved through: - -- `audio_resampler.py` -- reference-driven placement -- later bugfixes for short clips and defensive rendering - -One real bug that had to be solved: - -- `AUDIO RESAMPLE STUTTER` failed until the resampler and short-clip FFT handling were hardened - -That was fixed later and validated in real Live runs. - -### Phase 10: GPU And Deeper Reference Listening - -The user specifically asked to use GPU if possible. - -The system moved toward: - -- `DirectML` -- more expensive analysis of the reference -- segmented analysis at multiple window sizes -- heavier similarity scoring - -This became one of the most important changes for retrieval quality. - -The reference listener started doing: - -- segmenting the reference into short windows -- scoring by role -- deeper reranking -- using local metadata and cache - -This was the start of an audio-retrieval-style workflow rather than simple filename matching. - -### Phase 11: Segment RAG Direction - -To go further, the project started building a segment-level retrieval cache for the local library. - -That work added: - -- persistent per-segment cache files -- a segment builder CLI -- partial index expansion over the real library - -This is not a text RAG in the usual sense. It is closer to: - -- segmented audio retrieval -- coarse search plus rerank -- role-aware filtering -- diversity constraints - -This remains one of the most promising long-term directions for better remake quality. - -### Phase 12: GLM Workflow - -At some point token efficiency became a concern, so GLM-5 was introduced as a worker model. - -The workflow that proved useful was: - -1. Codex writes a narrow `.md` task -2. GLM edits only 1 to 3 related files -3. Codex reviews the diff -4. Codex corrects technical mistakes and validates the runtime - -What GLM was good at: - -- narrow feature implementation -- heuristic expansion -- helper tools -- reports and manifests - -What GLM was bad at: - -- declaring things complete too early -- runtime-sensitive work without supervision -- architectural judgment -- avoiding diff inflation - -Practical verdict: - -- useful as a worker -- not reliable enough as the sole closer of large features - -### Phase 13: Temporary WSL / n8n / Local Gitea Automation Detour - -There was a detour into WSL orchestration, n8n, local Gitea and Telegram loops. - -Scaffolding was generated for: - -- WSL deployment -- Docker Compose -- n8n flows -- Telegram notifications -- GLM to Codex automation loops - -That stack produced a lot of files under: - -- `automation\` -- `automation\wsl\` - -But the key lesson was: - -- the deployment summary was overstated -- the generated stack was not a truly finished deployment -- the main product value still lives in the music system, not in orchestration - -Because of that, the project intentionally pivoted back to the flow that worked: - -- Codex writes focused `.md` -- GLM does bounded work -- Codex reviews and fixes - -### Phase 14: Retrieval Hardening, Pattern Banks, Transition Materialization - -After the orchestration detour, the roadmap was re-centered on the actual product. - -Task batches improved: - -- role-safe retrieval -- repetition penalties -- more section pattern banks -- transition event materialization - -This reduced some of the "same loop again" feeling, but did not solve everything. - -### Phase 15: Device Automation Snapshots - -The latest work pushed into: - -- track device parameter snapshots -- return device parameter snapshots -- master section snapshots -- automation summaries - -GLM implemented part of that, but the raw result was not correct. - -The real fixes applied afterward were: - -- use the already existing `SECTION_DEVICE_AUTOMATION` instead of duplicate tables -- use the already existing `MASTER_DEVICE_AUTOMATION` -- flatten `device_parameters` into the format the server actually applies -- make the server accept both flat and nested payloads defensively -- add `mix_automation_summary` -- add `mix_automation_warnings` - -This work lives mainly in: - -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\song_generator.py` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\server.py` - -### Phase 16: Fix for "Only Piano" Issue (Audio Samples Not Loading) - -The user reported: "lo que me generaste solo tiene algunos pianos, nada de sonidos de mi biblioteca!" - the generated tracks only had piano/MIDI sounds, no actual audio samples from the local library. - -Root causes found and fixed: - -1. **`ROLE_PATTERNS` used non-recursive globs** in `reference_listener.py` lines 1228-1254: - - Patterns like `'*Kick*.wav'` couldn't match files in subdirectories like `loops/kick/` - - Fixed by changing to recursive patterns: `'**/*Kick*.wav'` - -2. **Method resolution bug** in `reference_listener.py`: - - `_get_role_section_features` and `_section_character_bonus` are methods in `SectionDetector` class - - Were being called as `self._method` from `ReferenceAudioListener` instead of `self._section_detector._method` - - Fixed at lines 3247 and 3270-3272 - -After fixes, verification showed: -- Reference audio plan builds correctly with 30+ layers -- `_materialize_reference_audio_layers()` creates actual audio tracks -- Real samples from local library are used (e.g., "mt kick hit 10.wav", "bbh - primer impacto - bass loop 03 g#m.wav") -- 34 audio tracks created (8 derived + 26 base), 0 errors - -## What Currently Works - -The system can currently: - -- generate full projects in Arrangement View -- build MIDI plus stock-device layers -- build many `AUDIO ...` layers from the local library -- analyze a reference track -- build a retrieval-guided original result -- commit scenes to Arrangement -- create buses and returns -- route tracks into buses -- apply track, return and master snapshots -- diagnose the generated set -- validate the generated set -- use DirectML for deeper matching work - -## What Is Stable - -The most stable route today is: - -1. analyze reference if one is available -2. build config in `song_generator.py` -3. materialize Session blueprint through the runtime -4. commit Session to Arrangement -5. place audio layers in Arrangement -6. validate and diagnose - -Do not casually change this order. - -## What Is Still Weak - -The project still has real weaknesses: - -- some generations still feel too loop-based -- retrieval can still pick poor family matches -- remake quality is not yet close enough to a convincing stylistic remake -- some runs still overuse familiar sound families -- loudness and gain staging can vary too much between runs -- derived resample layers are not always present in the final set -- the MCP wrapper for `generate_track` can time out even when Live keeps working - -## Known Operational Problems - -### 1. Recovery popup can block everything - -If Live crashes or thinks recovery is needed, a modal popup can block the socket. - -The file involved is: - -- `C:\Users\ren\AppData\Roaming\Ableton\Live 12.0.15\Preferences\CrashRecoveryInfo.cfg` - -Practical fix used during development: - -- kill Ableton -- blank that file -- restart Ableton - -### 2. MCP wrapper timeout - -The MCP tool wrapper around `generate_track` often times out at about 120 seconds. - -Important: - -- timeout does not always mean generation failed -- often the set keeps building inside Live - -Safer checks after a timeout: - -- `get_session_info()` -- `get_tracks()` -- `validate_set()` -- `diagnose_generated_set()` - -### 3. Runtime state can drift from stored manifest - -At least once, `get_generation_manifest()` returned stale data from an older generation while the actual current set in Live was already different. - -Trust runtime state first: - -- session info -- track list -- diagnosis -- validation - -Manifest is useful, but not always the freshest source. - -### 4. GLM reports often exaggerate completion - -Never trust a GLM report by itself. -Always compare: - -- report -- actual diff -- runtime result - -## Current Validation Habit - -Minimum technical checks after code edits: - -```powershell -python -m py_compile "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\audio_resampler.py" -python -m py_compile "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\reference_listener.py" -python -m py_compile "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\sample_selector.py" -python -m py_compile "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\server.py" -python -m py_compile "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\song_generator.py" -python -m py_compile "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\__init__.py" -``` - -Minimum Live checks: - -- `get_session_info()` -- `get_tracks()` -- `validate_set(check_clips=True, check_gain=True, check_routing=True)` -- `diagnose_generated_set()` - -Useful direct smoke test: - -```powershell -cd "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server" -python socket_smoke_test.py -``` - -## Current Roadmap Priority - -The active roadmap is: - -1. better library retrieval -2. more real section variation -3. better transition and derived FX placement -4. better device automation -5. gain staging and loudness consistency -6. better remake ability -7. stronger QA and export - -The roadmap source file is: - -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\ROADMAP_MASTER_GLM.md` - -## Current GLM Workflow - -Keep GLM on short, bounded tasks only. - -Recent task files: - -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\GLM_NEXT_TASK_001_RETRIEVAL_ROLE_PENALTIES.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\GLM_NEXT_TASK_002_SECTION_PATTERN_BANK_EXPANSION.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\GLM_NEXT_TASK_003_TRANSITION_EVENT_MATERIALIZATION.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\GLM_NEXT_TASK_004_DEVICE_AUTOMATION_SNAPSHOTS.md` - -Corresponding reports: - -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\GLM_TASK_001_REPORT.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\GLM_TASK_002_REPORT.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\GLM_TASK_003_REPORT.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\GLM_TASK_004_REPORT.md` - -Recommended pattern: - -1. Codex writes one narrow task md -2. GLM edits only 1 to 3 files -3. Codex reviews diff -4. Codex corrects technical and runtime mistakes -5. only then decide whether the task is really done - -## Current Documentation Map - -Core repo docs: - -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\README.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\CLAUDE.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\AI_HANDOFF.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\ARCHITECTURE.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\MCP_TOOLS.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\REMOTE_PROTOCOL.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\PROJECT_CONTEXT.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\docs\GPU_DIRECTML.md` - -Generator and retrieval docs: - -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\ABLETUNES_TEMPLATE_NOTES.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server\SAMPLE_SYSTEM_README.md` - -Roadmaps and backlog: - -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\roadmap.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\roadmap2.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\ROADMAP_MASTER_GLM.md` - -Automation detour docs: - -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\automation\README.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\automation\MASTER_AUTONOMOUS_ROADMAP.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\automation\wsl\README.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\automation\wsl\DEPLOYMENT_SUMMARY.md` -- `C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\WSL_STACK.md` - -## If You Need To Resume Development Safely - -Start here: - -1. read this file -2. read the current roadmap -3. inspect local git status -4. compile changed Python files -5. restart Live if `__init__.py` changed -6. clear recovery popup if needed -7. validate with `get_session_info`, `get_tracks`, `validate_set`, `diagnose_generated_set` -8. only then touch generation logic - -## Current Honest Product Status - -This project is no longer a basic prototype. - -It is already an advanced Ableton generation system that can: - -- create long arrangements -- use references intelligently -- retrieve from a large local library -- mix MIDI and audio material -- build buses, returns and snapshots -- generate editable results in Arrangement View - -But it is still not finished. - -The main gap is no longer raw plumbing. The main gap is artistic fidelity: - -- better retrieval -- better variation -- better remake quality -- more consistent mix quality - -That is the real work that remains. diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/ABLETUNES_TEMPLATE_NOTES.md b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/ABLETUNES_TEMPLATE_NOTES.md deleted file mode 100644 index ff7dcf3..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/ABLETUNES_TEMPLATE_NOTES.md +++ /dev/null @@ -1,39 +0,0 @@ -# Abletunes Template Notes - -Estos templates muestran patrones claros de produccion real que conviene copiar en el generador. - -## Patrones fuertes - -- Son `arrangement-first`, no `session-first`. En los cuatro sets los clips viven casi enteros en Arrangement y las scenes estan vacias o sin rol productivo. -- Todos usan locators para secciones (`Intro`, `Breakdown`, `Drop`, `Break`, `Outro`, `End`) y esas secciones casi siempre caen en bloques de `16`, `32`, `64`, `96` o `128` beats. -- Siempre hay jerarquia por grupos: drums/top drums, bass, instruments, vox, fx. -- Casi siempre existe un `SC Trigger` o pista equivalente dedicada al sidechain. -- Los drums no son una sola pista. Hay capas separadas para kick, clap, snare, hats, ride, perc, fills, crashes, risers y FX. -- Las partes armonicas tampoco son una sola pista. Aparecen capas distintas para bassline, reese/sub, chord, piano, string, pluck, lead y layers. -- Mezclan MIDI e audio de forma agresiva. Un productor no se queda solo con MIDI: imprime loops, resamples, freeze y audios procesados cuando hace falta. -- Hay bastante tratamiento por pista: `Eq8`, `Compressor2`, `Reverb`, `AutoFilter`, `PingPongDelay`, `GlueCompressor`, `MultibandDynamics`, `Limiter`, `Saturator`. - -## Lo que mas importa para el MCP - -- El generador no tiene que crear "un loop largo". Tiene que crear secciones con mutaciones claras entre una y otra. -- Cada seccion necesita variacion de densidad, no solo mute/unmute basico. Los templates meten fills, crashes, reverse FX, chants, top loops y capas extra solo en puntos de tension. -- El arreglo profesional usa mas pistas especializadas de las que hoy genera el MCP. La separacion por rol es parte del sonido. -- Hay que imprimir mas audio original derivado del propio proyecto: resamples, reverses, freezes y FX hechos a partir de material propio. -- Los returns son pocos pero concretos. No hace falta llenar de sends; hace falta `reverb`, `delay` y buses de grupo bien usados. - -## Señales concretas vistas en el pack - -- `Abletunes - Dope As F_ck`: `128 BPM`, 6 grupos, 2 returns, `Sylenth1` dominante, mucha automatizacion (`8121` eventos). -- `Abletunes - Freedom`: `126 BPM`, mezcla house mas simple, bateria muy separada, menos automatizacion, mucho `OriginalSimpler` + `Serum`. -- `Abletunes - Hideout`: set largo y cargado, `Massive` + `Sylenth1`, una bateria enorme y mucha automatizacion (`6470` eventos). -- `Abletunes - Nobody's Watching`: enfoque mas stock, usa `Operator`, `Simpler`, bastante audio vocal y FX impresos. - -## Reglas que deberiamos incorporar - -- Generar por defecto en Arrangement, con locators reales y secciones de 16/32 bars. -- Añadir `SC Trigger`, grupos y returns fijos desde el blueprint. -- Separar drums en mas roles: kick, clap main, clap layer, snare fill, hats, ride, perc main, perc FX, crash, reverse, riser. -- Separar armonia y hooks: sub, bassline, chord stab, piano/keys, string/pad, pluck, lead, accent synth. -- Crear eventos de transicion por seccion: uplifter, downlifter, reverse crash, vocal chop, tom fill. -- Imprimir audio derivado del material generado cuando una capa necesite mas impacto o textura. -- Meter automatizacion por seccion en filtros, sends, volumen de grupos y FX de transicion. diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/SAMPLE_SYSTEM_README.md b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/SAMPLE_SYSTEM_README.md deleted file mode 100644 index 9d6835c..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/SAMPLE_SYSTEM_README.md +++ /dev/null @@ -1,203 +0,0 @@ -# Sistema de Gestión de Samples - AbletonMCP-AI - -Sistema completo de indexación, clasificación y selección inteligente de samples musicales. - -## Componentes - -### 1. `audio_analyzer.py` - Análisis de Audio - -Detecta automáticamente características de archivos de audio: -- **BPM**: Detección de tempo mediante análisis de onset -- **Key**: Detección de tonalidad mediante cromagrama -- **Tipo**: Clasificación en kick, snare, bass, synth, etc. -- **Características espectrales**: Centroide, rolloff, RMS - -**Uso básico:** -```python -from audio_analyzer import analyze_sample - -result = analyze_sample("path/to/sample.wav") -print(f"BPM: {result['bpm']}, Key: {result['key']}") -print(f"Tipo: {result['sample_type']}") -``` - -**Backends:** -- `librosa`: Análisis completo (requiere instalación) -- `basic`: Análisis por nombre de archivo (sin dependencias) - -### 2. `sample_manager.py` - Gestión de Librería - -Gestor completo de la librería de samples: -- Indexación recursiva de directorios -- Clasificación automática por categorías -- Metadatos extensibles (tags, rating, géneros) -- Búsqueda avanzada con múltiples filtros -- Persistencia en JSON - -**Categorías principales:** -- `drums`: kick, snare, clap, hat, perc, shaker, tom, cymbal -- `bass`: sub, bassline, acid -- `synths`: lead, pad, pluck, chord, fx -- `vocals`: vocal, speech, chant -- `loops`: drum_loop, bass_loop, synth_loop, full_loop -- `one_shots`: hit, noise - -**Uso básico:** -```python -from sample_manager import SampleManager - -# Inicializar -manager = SampleManager(r"C:\Users\ren\embeddings\all_tracks") - -# Escanear -stats = manager.scan_directory(analyze_audio=True) - -# Buscar -kicks = manager.search(sample_type="kick", key="Am", bpm=128) -house_samples = manager.search(genres=["house"], limit=10) - -# Obtener pack completo -pack = manager.get_pack_for_genre("techno", key="F#m", bpm=130) -``` - -### 3. `sample_selector.py` - Selección Inteligente - -Selección contextual basada en género, key y BPM: -- Perfiles de género predefinidos -- Matching armónico entre samples -- Generación de kits de batería coherentes -- Mapeo MIDI automático - -**Géneros soportados:** -- Techno (industrial, minimal, acid) -- House (deep, classic, progressive) -- Tech-House -- Trance (progressive, psy) -- Drum & Bass (liquid, neuro) -- Ambient - -**Uso básico:** -```python -from sample_selector import SampleSelector - -selector = SampleSelector() - -# Seleccionar para un género -group = selector.select_for_genre("techno", key="F#m", bpm=130) - -# Acceder a elementos -group.drums.kick # Sample de kick -group.bass # Lista de bass samples -group.synths # Lista de synths - -# Mapeo MIDI -mapping = selector.get_midi_mapping_for_kit(group.drums) - -# Cambio de key armónico -new_key = selector.suggest_key_change("Am", "fifth_up") # Em -``` - -## Integración con MCP Server - -El servidor MCP expone las siguientes herramientas: - -### Gestión de Librería -- `scan_sample_library` - Escanear directorio de samples -- `get_sample_library_stats` - Estadísticas de la librería - -### Búsqueda y Selección -- `advanced_search_samples` - Búsqueda con filtros múltiples -- `select_samples_for_genre` - Selección automática por género -- `get_drum_kit_mapping` - Kit de batería con mapeo MIDI -- `get_sample_pack_for_project` - Pack completo para proyecto - -### Análisis y Compatibilidad -- `analyze_audio_file` - Analizar archivo de audio -- `find_compatible_samples` - Encontrar samples compatibles -- `suggest_key_change` - Sugerir cambios de tonalidad - -## Estructura de Datos - -### Sample -```python -@dataclass -class Sample: - id: str # ID único - name: str # Nombre del archivo - path: str # Ruta completa - category: str # Categoría principal - subcategory: str # Subcategoría - sample_type: str # Tipo específico - key: Optional[str] # Tonalidad (Am, F#m, C) - bpm: Optional[float] # BPM - duration: float # Duración en segundos - genres: List[str] # Géneros asociados - tags: List[str] # Tags - rating: int # Rating 0-5 -``` - -### DrumKit -```python -@dataclass -class DrumKit: - name: str - kick: Optional[Sample] - snare: Optional[Sample] - clap: Optional[Sample] - hat_closed: Optional[Sample] - hat_open: Optional[Sample] - perc1: Optional[Sample] - perc2: Optional[Sample] -``` - -## Mapeo MIDI - -Notas estándar para drums: -- `36` (C1): Kick -- `38` (D1): Snare -- `39` (D#1): Clap -- `42` (F#1): Closed Hat -- `46` (A#1): Open Hat -- `41` (F1): Tom Low -- `49` (C#2): Crash - -## Ejemplos de Uso - -### Crear un track completo -```python -# Seleccionar samples para techno -selector = get_selector() -group = selector.select_for_genre("techno", key="F#m", bpm=130) - -# Usar con Ableton -ableton = get_ableton_connection() - -# Crear tracks y cargar samples -for i, sample in enumerate([group.drums.kick, group.drums.snare]): - if sample: - print(f"Cargar {sample.name} en track {i}") -``` - -### Buscar samples compatibles -```python -# Encontrar samples que combinen con un kick -kick = manager.get_by_path("path/to/kick.wav") -compatible = selector.find_compatible_samples(kick, max_results=5) - -for sample, score in compatible: - print(f"{sample.name}: {score:.1%} compatible") -``` - -## Archivos Generados - -- `.sample_cache/sample_library.json` - Índice de la librería -- `.sample_cache/library_stats.json` - Estadísticas - -## Dependencias Opcionales - -Para análisis de audio completo: -```bash -pip install librosa soundfile numpy -``` - -Sin estas dependencias, el sistema funciona en modo "basic" usando metadatos de los nombres de archivo. diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/__init__.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/__init__.py deleted file mode 100644 index aef464d..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -""" -MCP Server para AbletonMCP-AI -Servidor FastMCP que conecta Claude con Ableton Live 12 -""" - -from .server import mcp, main -from .song_generator import SongGenerator -from .sample_index import SampleIndex - -# Nuevo sistema de samples -try: - SAMPLE_SYSTEM_AVAILABLE = True -except ImportError: - SAMPLE_SYSTEM_AVAILABLE = False - -__all__ = [ - 'mcp', 'main', - 'SongGenerator', 'SampleIndex', -] - -if SAMPLE_SYSTEM_AVAILABLE: - __all__.extend([ - 'SampleManager', 'Sample', 'get_manager', - 'SampleSelector', 'get_selector', 'DrumKit', 'InstrumentGroup', - 'AudioAnalyzer', 'analyze_sample', 'SampleType', - ]) diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/agent11_harmony_review.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/agent11_harmony_review.py deleted file mode 100644 index 470f44a..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/agent11_harmony_review.py +++ /dev/null @@ -1,318 +0,0 @@ -import json -import socket -from datetime import datetime -import os - -LOG_FILE = r"C:\Users\ren\Documents\Ableton\Logs\agent11_review_harmony.txt" - -CHORD_TONES = { - "Am": [57, 60, 64], - "F": [53, 57, 60], - "C": [48, 52, 55], - "G": [43, 47, 50] -} - -CHORD_NAMES = { - "Am": ["A", "C", "E"], - "F": ["F", "A", "C"], - "C": ["C", "E", "G"], - "G": ["G", "B", "D"] -} - -AM_SCALE = [57, 59, 60, 62, 64, 65, 67] - -PROGRESSION_ORDER = ["Am", "F", "C", "G"] -CHORD_DURATION = 8.0 - -def pitch_to_name(pitch): - names = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"] - return names[pitch % 12] - -def get_chord_at_time(start_time): - chord_index = int(start_time // CHORD_DURATION) % 4 - return PROGRESSION_ORDER[chord_index] - -def normalize_to_octave(pitch, target_octave=3): - return (pitch % 12) + (target_octave * 12) - -class AbletonSocketClient: - def __init__(self, host="127.0.0.1", port=9877, timeout=15.0): - self.host = host - self.port = port - self.timeout = timeout - - def send(self, command_type, params=None): - payload = json.dumps({ - "type": command_type, - "params": params or {}, - }).encode("utf-8") + b"\n" - - with socket.create_connection((self.host, self.port), timeout=self.timeout) as sock: - sock.sendall(payload) - reader = sock.makefile("r", encoding="utf-8") - try: - line = reader.readline() - finally: - reader.close() - try: - sock.shutdown(socket.SHUT_RDWR) - except OSError: - pass - - if not line: - raise RuntimeError(f"No response for command: {command_type}") - - return json.loads(line) - -def log_message(msg): - timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") - log_line = f"[{timestamp}] {msg}\n" - print(log_line.strip()) - with open(LOG_FILE, "a", encoding="utf-8") as f: - f.write(log_line) - -def analyze_track_harmony(client, track_index, track_name, scene_index=0): - issues = [] - notes_in_key = 0 - notes_out_of_key = 0 - chord_matches = 0 - chord_mismatches = 0 - - try: - response = client.send("get_notes", { - "track_index": track_index, - "scene_index": scene_index - }) - - if response.get("status") != "success": - return {"error": response.get("message", "Unknown error")} - - notes = response.get("result", {}).get("notes", []) - - if not notes: - return {"warning": "No notes found in clip"} - - for note in notes: - pitch = note.get("pitch", 60) - start = note.get("start", 0) - duration = note.get("duration", 1) - - pitch_class = pitch % 12 - current_chord = get_chord_at_time(start) - - in_am_scale = any((pitch % 12) == (p % 12) for p in AM_SCALE) - - if in_am_scale: - notes_in_key += 1 - else: - notes_out_of_key += 1 - issues.append({ - "type": "out_of_key", - "pitch": pitch, - "pitch_name": pitch_to_name(pitch), - "start": start, - "expected": "Am scale (A, B, C, D, E, F, G)" - }) - - chord_tones_normalized = [t % 12 for t in CHORD_TONES[current_chord]] - if pitch_class in chord_tones_normalized: - chord_matches += 1 - else: - chord_mismatches += 1 - chord_tone_names = CHORD_NAMES[current_chord] - issues.append({ - "type": "chord_tone_mismatch", - "pitch": pitch, - "pitch_name": pitch_to_name(pitch), - "start": start, - "chord": current_chord, - "expected_chord_tones": chord_tone_names - }) - - return { - "total_notes": len(notes), - "notes_in_key": notes_in_key, - "notes_out_of_key": notes_out_of_key, - "chord_matches": chord_matches, - "chord_mismatches": chord_mismatches, - "issues": issues - } - - except Exception as e: - return {"error": str(e)} - -def analyze_bass_notes(client, track_index, scene_index=0): - issues = [] - correct_roots = 0 - incorrect_roots = 0 - - try: - response = client.send("get_notes", { - "track_index": track_index, - "scene_index": scene_index - }) - - if response.get("status") != "success": - return {"error": response.get("message", "Unknown error")} - - notes = response.get("result", {}).get("notes", []) - - if not notes: - return {"warning": "No bass notes found"} - - ROOT_NOTES = { - "Am": 57, - "F": 53, - "C": 48, - "G": 43 - } - - for note in notes: - pitch = note.get("pitch", 60) - start = note.get("start", 0) - - current_chord = get_chord_at_time(start) - expected_root = ROOT_NOTES[current_chord] - expected_root_class = expected_root % 12 - pitch_class = pitch % 12 - - if pitch_class == expected_root_class: - correct_roots += 1 - else: - incorrect_roots += 1 - if start % 4.0 < 0.5: - issues.append({ - "type": "wrong_bass_root", - "pitch": pitch, - "pitch_name": pitch_to_name(pitch), - "start": start, - "chord": current_chord, - "expected_root": pitch_to_name(expected_root) - }) - - return { - "total_notes": len(notes), - "correct_roots": correct_roots, - "incorrect_roots": incorrect_roots, - "issues": issues - } - - except Exception as e: - return {"error": str(e)} - -def main(): - os.makedirs(os.path.dirname(LOG_FILE), exist_ok=True) - - log_message("=" * 60) - log_message("AGENT 11 - HARMONIC COHERENCE REVIEW") - log_message("=" * 60) - log_message(f"Target progression: Am - F - C - G (8 beats each)") - log_message(f"Am scale: A, B, C, D, E, F, G") - log_message("") - - client = AbletonSocketClient() - - session = client.send("get_session_info") - if session.get("status") != "success": - log_message("ERROR: Cannot connect to Ableton session") - return - - log_message(f"Session: {session.get('result', {}).get('num_tracks', 0)} tracks, " - f"tempo: {session.get('result', {}).get('tempo', 120)} BPM") - - tracks_response = client.send("get_tracks") - if tracks_response.get("status") != "success": - log_message("ERROR: Cannot get tracks") - return - - tracks = tracks_response.get("result", []) - - midi_tracks = [ - (i, t.get("name", "Unknown"), t.get("session_clip_count", 0)) - for i, t in enumerate(tracks) - if t.get("has_midi_input") and t.get("session_clip_count", 0) > 0 - ] - - log_message(f"Found {len(midi_tracks)} MIDI tracks with clips") - log_message("") - - total_issues = 0 - critical_issues = 0 - - for track_index, track_name, clip_count in midi_tracks: - log_message(f"\n--- TRACK {track_index}: {track_name} ---") - - if "BASS" in track_name.upper(): - log_message("Analyzing as BASS track (checking root notes)") - result = analyze_bass_notes(client, track_index) - else: - log_message("Analyzing harmonic content") - result = analyze_track_harmony(client, track_index, track_name) - - if "error" in result: - log_message(f" ERROR: {result['error']}") - continue - - if "warning" in result: - log_message(f" WARNING: {result['warning']}") - continue - - if "total_notes" in result: - log_message(f" Total notes: {result['total_notes']}") - - if "notes_in_key" in result: - log_message(f" Notes in Am scale: {result['notes_in_key']}/{result['total_notes']}") - if result["notes_out_of_key"] > 0: - log_message(f" OUT OF KEY: {result['notes_out_of_key']} notes") - total_issues += result["notes_out_of_key"] - - if "chord_matches" in result: - log_message(f" Chord tone matches: {result['chord_matches']}/{result['total_notes']}") - if result["chord_mismatches"] > 0: - log_message(f" CHORD MISMATCHES: {result['chord_mismatches']} notes") - - if "correct_roots" in result: - log_message(f" Correct bass roots: {result['correct_roots']}/{result['total_notes']}") - if result["incorrect_roots"] > 0: - log_message(f" WRONG BASS ROOTS: {result['incorrect_roots']} notes") - total_issues += result["incorrect_roots"] - critical_issues += result["incorrect_roots"] - - if result.get("issues"): - for issue in result["issues"][:5]: - if issue["type"] == "out_of_key": - log_message(f" [ISSUE] Note {issue['pitch_name']}{issue['pitch']} at beat {issue['start']:.1f} " - f"not in Am scale") - elif issue["type"] == "chord_tone_mismatch": - log_message(f" [ISSUE] Note {issue['pitch_name']}{issue['pitch']} at beat {issue['start']:.1f} " - f"not in chord {issue['chord']} (expected: {issue['expected_chord_tones']})") - elif issue["type"] == "wrong_bass_root": - log_message(f" [CRITICAL] Bass note {issue['pitch_name']}{issue['pitch']} at beat {issue['start']:.1f} " - f"should be {issue['expected_root']} for chord {issue['chord']}") - - log_message("\n" + "=" * 60) - log_message("HARMONIC COHERENCE SUMMARY") - log_message("=" * 60) - - if critical_issues > 0: - log_message(f"STATUS: CRITICAL ISSUES FOUND") - log_message(f" - {critical_issues} critical bass root mismatches") - log_message(f" - {total_issues} total harmonic issues") - log_message("") - log_message("RECOMMENDATION: Review bass notes and chord tones") - elif total_issues > 0: - log_message(f"STATUS: MINOR ISSUES FOUND") - log_message(f" - {total_issues} notes out of Am scale") - log_message("") - log_message("RECOMMENDATION: May be intentional chromatic passing tones") - else: - log_message(f"STATUS: HARMONICALLY COHERENT") - log_message(f" - All notes in Am scale") - log_message(f" - Bass follows root progression A-F-C-G") - log_message(f" - Chord tones align with progression") - - log_message("") - log_message("Agent 11 review complete.") - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/agent17_sample_loader.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/agent17_sample_loader.py deleted file mode 100644 index ad8fec6..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/agent17_sample_loader.py +++ /dev/null @@ -1,192 +0,0 @@ -""" -Agent 17 - Sample Loading Reviewer -Verifies audio tracks have samples loaded and loads samples if needed. -""" -import socket -import json -import os -import glob -from datetime import datetime - -LOG_FILE = r"C:\Users\ren\Documents\Ableton\Logs\agent17_review_samples.txt" -SAMPLE_LIBRARY = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\all_tracks" -ORGANIZED_LIBRARY = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples" -HOST = "127.0.0.1" -PORT = 9877 - -def log(message): - timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") - log_line = f"[{timestamp}] {message}" - print(log_line) - os.makedirs(os.path.dirname(LOG_FILE), exist_ok=True) - with open(LOG_FILE, "a", encoding="utf-8") as f: - f.write(log_line + "\n") - -def send_command(command_type, params=None): - if params is None: - params = {} - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.settimeout(30) - try: - sock.connect((HOST, PORT)) - request = {"type": command_type, "params": params} - sock.sendall((json.dumps(request) + "\n").encode("utf-8")) - response = b"" - while True: - chunk = sock.recv(4096) - if not chunk: - break - response += chunk - if b"\n" in response: - break - return json.loads(response.decode("utf-8").strip()) - finally: - sock.close() - -def find_samples(query, sample_type=None): - samples = [] - search_paths = [ORGANIZED_LIBRARY, SAMPLE_LIBRARY] - - for search_path in search_paths: - if not os.path.exists(search_path): - continue - - pattern = f"**/*{query}*.wav" - for filepath in glob.glob(os.path.join(search_path, pattern), recursive=True): - if sample_type: - type_dir = os.path.join(search_path, sample_type) - if type_dir.lower() in filepath.lower(): - samples.append(filepath) - else: - samples.append(filepath) - - return samples[:15] - -def load_samples_to_track(track_index, track_name, sample_type, positions): - samples = find_samples(sample_type) - if not samples: - log(f" No samples found for type: {sample_type}") - return 0 - - clips_loaded = 0 - for i, sample_path in enumerate(samples): - if clips_loaded >= 10: - break - - position = positions[i] if i < len(positions) else positions[-1] + (i - len(positions) + 1) * 4 - - try: - result = send_command("create_arrangement_audio_pattern", { - "track_index": track_index, - "file_path": sample_path, - "positions": [position], - "name": f"{track_name} Clip {i+1}" - }) - if result.get("status") == "success": - clips_loaded += 1 - log(f" Loaded: {os.path.basename(sample_path)} at position {position}") - else: - log(f" Failed: {result.get('message', 'Unknown error')}") - except Exception as e: - log(f" Error loading sample: {e}") - - return clips_loaded - -def main(): - log("=" * 60) - log("Agent 17 - Sample Loading Reviewer Started") - log("=" * 60) - - log("\n[1] Connecting to Ableton socket...") - try: - session = send_command("get_session_info", {}) - if session.get("status") != "success": - log(f"ERROR: Failed to get session info: {session}") - return - log(f"Connected. Tempo: {session.get('result', {}).get('tempo', 'unknown')} BPM") - except Exception as e: - log(f"ERROR: Cannot connect to Ableton: {e}") - return - - log("\n[2] Getting track list...") - try: - tracks_response = send_command("get_tracks", {}) - if tracks_response.get("status") != "success": - log(f"ERROR: Failed to get tracks: {tracks_response}") - return - tracks = tracks_response.get("result", []) - log(f"Found {len(tracks)} tracks") - except Exception as e: - log(f"ERROR: Cannot get tracks: {e}") - return - - log("\n[3] Analyzing audio tracks...") - audio_tracks_needing_samples = [] - - for track in tracks: - track_name = track.get("name", "") - track_index = track.get("index", -1) - has_audio = track.get("has_audio_input", False) and track.get("has_audio_output", False) - has_midi = track.get("has_midi_input", False) - arr_clips = track.get("arrangement_clip_count", 0) - - if has_audio and not has_midi: - if arr_clips < 10: - audio_tracks_needing_samples.append({ - "index": track_index, - "name": track_name, - "clips": arr_clips - }) - log(f" Track {track_index}: '{track_name}' - {arr_clips} clips (NEEDS SAMPLES)") - else: - log(f" Track {track_index}: '{track_name}' - {arr_clips} clips (OK)") - - if not audio_tracks_needing_samples: - log("\n[4] All audio tracks have sufficient samples!") - return - - log(f"\n[4] {len(audio_tracks_needing_samples)} tracks need samples. Loading...") - - track_type_map = { - "KICK": "kick", - "SNARE": "snare", - "HATS": "hat", - "HAT": "hat", - "BASS": "bass", - "LEAD": "synth", - "PAD": "atmos", - "ARP": "synth", - "PERC": "percussion", - "VOCAL": "vocal", - "RISER": "riser", - "CRASH": "crash", - "DOWNLIFTER": "fx", - "AUDIO": "synth" - } - - positions = [0, 8, 16, 24, 32, 40, 48, 56, 64, 72] - - for track_info in audio_tracks_needing_samples: - track_index = track_info["index"] - track_name = track_info["name"] - - sample_type = "synth" - for key, stype in track_type_map.items(): - if key in track_name.upper(): - sample_type = stype - break - - log(f"\n Loading {sample_type} samples into track {track_index} ('{track_name}')...") - clips_loaded = load_samples_to_track(track_index, track_name, sample_type, positions) - track_info["loaded"] = clips_loaded - - log("\n" + "=" * 60) - log("SUMMARY") - log("=" * 60) - for track_info in audio_tracks_needing_samples: - log(f" Track {track_info['index']} ('{track_info['name']}'): {track_info.get('clips', 0)} -> +{track_info.get('loaded', 0)} clips loaded") - - log("\nAgent 17 completed.") - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/agent7_vocals.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/agent7_vocals.py deleted file mode 100644 index 5d1e4fe..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/agent7_vocals.py +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env python3 -""" -Agent 7 - VOCAL/CHOIR SPECIALIST -Loads vocal samples at specific arrangement positions -""" -import socket -import json -import sys - -HOST = "127.0.0.1" -PORT = 9877 - -VOCAL_MAIN_TRACK = 12 -VOCAL_TEXTURE_TRACK = 13 - -VOCAL_MAIN_SAMPLES = [ - r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\loops\vocal\BBH- Primer Impacto - Vocal Quema D#m 126 Bpm.wav", - r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\oneshots\vocal\BBH - Primer Impacto - Vocal Importante 1.wav", - r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\oneshots\vocal\BBH - Primer Impacto - Vocal Importante 2.wav", - r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\oneshots\vocal\BBH - Primer Impacto - Vocal Importante 3.wav", -] - -VOCAL_TEXTURE_SAMPLES = [ - r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\loops\vocal\Vox_03_Am_125.wav", - r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\loops\vocal\Vox_05_Cm_125.wav", - r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\loops\vocal\Vox_08_Cm_125.wav", - r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\loops\vocal\Vox_10_Bm_125.wav", -] - -VOCAL_MAIN_POSITIONS = [16.0, 48.0, 80.0, 112.0] -VOCAL_TEXTURE_POSITIONS = [0.0, 32.0, 64.0, 96.0] - -LOG_FILE = r"C:\Users\ren\Documents\Ableton\Logs\agent7_vocals.txt" - -def send_command(command_type: str, params: dict = None, timeout: float = 45.0) -> dict: - payload = json.dumps({ - "type": command_type, - "params": params or {}, - }).encode("utf-8") + b"\n" - - with socket.create_connection((HOST, PORT), timeout=timeout) as sock: - sock.sendall(payload) - reader = sock.makefile("r", encoding="utf-8") - line = reader.readline() - if not line: - raise RuntimeError(f"No response for command: {command_type}") - return json.loads(line) - -def log(msg: str): - with open(LOG_FILE, "a", encoding="utf-8") as f: - f.write(msg + "\n") - print(msg) - -def main(): - log("=" * 60) - log("AGENT 7 - VOCAL/CHOIR SPECIALIST") - log("=" * 60) - - # Step 1: Set input routing to "No Input" for both tracks - log("\n[STEP 1] Setting input routing to 'No Input'...") - - for track_idx, track_name in [(VOCAL_MAIN_TRACK, "VOCAL MAIN"), (VOCAL_TEXTURE_TRACK, "VOCAL TEXTURE")]: - try: - result = send_command("set_track_input_routing", {"index": track_idx, "routing_name": "No Input"}) - log(f" Track {track_idx} ({track_name}): {result}") - except Exception as e: - log(f" ERROR Track {track_idx}: {e}") - - # Step 2: Load VOCAL MAIN samples at key moments - log("\n[STEP 2] Loading VOCAL MAIN samples at key moments...") - - for i, (sample_path, position) in enumerate(zip(VOCAL_MAIN_SAMPLES, VOCAL_MAIN_POSITIONS)): - try: - result = send_command("create_arrangement_audio_pattern", { - "track_index": VOCAL_MAIN_TRACK, - "file_path": sample_path, - "positions": [position], - "name": f"Vocal Main {i+1}" - }) - log(f" Position {position}: {sample_path.split(chr(92))[-1]} -> {result.get('status', 'unknown')}") - except Exception as e: - log(f" ERROR at position {position}: {e}") - - # Step 3: Load VOCAL TEXTURE samples at atmospheric positions - log("\n[STEP 3] Loading VOCAL TEXTURE samples at atmospheric positions...") - - for i, (sample_path, position) in enumerate(zip(VOCAL_TEXTURE_SAMPLES, VOCAL_TEXTURE_POSITIONS)): - try: - result = send_command("create_arrangement_audio_pattern", { - "track_index": VOCAL_TEXTURE_TRACK, - "file_path": sample_path, - "positions": [position], - "name": f"Vocal Texture {i+1}" - }) - log(f" Position {position}: {sample_path.split(chr(92))[-1]} -> {result.get('status', 'unknown')}") - except Exception as e: - log(f" ERROR at position {position}: {e}") - - log("\n" + "=" * 60) - log("AGENT 7 COMPLETE - Vocal layers loaded") - log("=" * 60) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/agent8_fx_transitions.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/agent8_fx_transitions.py deleted file mode 100644 index b5c7561..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/agent8_fx_transitions.py +++ /dev/null @@ -1,102 +0,0 @@ -import json -import socket -from datetime import datetime - -LOG_FILE = r"C:\Users\ren\Documents\Ableton\Logs\agent8_fx.txt" - -def log(msg): - timestamp = datetime.now().isoformat() - entry = f"[{timestamp}] {msg}" - print(entry) - with open(LOG_FILE, "a", encoding="utf-8") as f: - f.write(entry + "\n") - -class AbletonSocketClient: - def __init__(self, host="127.0.0.1", port=9877, timeout=30.0): - self.host = host - self.port = port - self.timeout = timeout - - def send(self, command_type, params=None): - payload = json.dumps({ - "type": command_type, - "params": params or {}, - }).encode("utf-8") + b"\n" - - with socket.create_connection((self.host, self.port), timeout=self.timeout) as sock: - sock.sendall(payload) - reader = sock.makefile("r", encoding="utf-8") - try: - line = reader.readline() - finally: - reader.close() - try: - sock.shutdown(socket.SHUT_RDWR) - except OSError: - pass - - if not line: - raise RuntimeError(f"No response for command: {command_type}") - return json.loads(line) - -def main(): - log("=" * 60) - log("AGENT 8 - FX TRANSITION SPECIALIST") - log("=" * 60) - - client = AbletonSocketClient() - - RISER_TRACK = 16 - DOWNLIFTER_TRACK = 17 - CRASH_TRACK = 18 - - RISER_PATH = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\loops\fx\BBH - Primer Impacto -Risers 2.wav" - DOWNLIFTER_PATH = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\loops\fx\EFX_01_Em_125.wav" - CRASH_PATH = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\loops\fx\BBH - Primer Impacto - Crash 1.wav" - - RISER_POSITIONS = [14, 46, 78, 110, 142, 174] - DOWNLIFTER_POSITIONS = [16, 48, 80, 112, 144, 176] - CRASH_POSITIONS = [0, 32, 64, 96, 128, 160, 192] - - log(f"Track indices: RISER={RISER_TRACK}, DOWNLIFTER={DOWNLIFTER_TRACK}, CRASH={CRASH_TRACK}") - log(f"Riser positions: {RISER_POSITIONS}") - log(f"Downlifter positions: {DOWNLIFTER_POSITIONS}") - log(f"Crash positions: {CRASH_POSITIONS}") - - log("") - log("Step 1: Placing RISER samples...") - result = client.send("create_arrangement_audio_pattern", { - "track_index": RISER_TRACK, - "file_path": RISER_PATH, - "positions": RISER_POSITIONS, - "name": "RISER FX" - }) - log(f"RISER result: {json.dumps(result, indent=2)}") - - log("") - log("Step 2: Placing DOWNLIFTER samples (using EFX fallback)...") - result = client.send("create_arrangement_audio_pattern", { - "track_index": DOWNLIFTER_TRACK, - "file_path": DOWNLIFTER_PATH, - "positions": DOWNLIFTER_POSITIONS, - "name": "DOWNLIFTER FX" - }) - log(f"DOWNLIFTER result: {json.dumps(result, indent=2)}") - - log("") - log("Step 3: Placing CRASH samples...") - result = client.send("create_arrangement_audio_pattern", { - "track_index": CRASH_TRACK, - "file_path": CRASH_PATH, - "positions": CRASH_POSITIONS, - "name": "CRASH FX" - }) - log(f"CRASH result: {json.dumps(result, indent=2)}") - - log("") - log("=" * 60) - log("AGENT 8 COMPLETE") - log("=" * 60) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/agent9_perc_loader.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/agent9_perc_loader.py deleted file mode 100644 index bbbfce8..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/agent9_perc_loader.py +++ /dev/null @@ -1,184 +0,0 @@ -""" -Agent 9 - PERCUSSION SPECIALIST -Loads percussion samples into AUDIO PERC MAIN and AUDIO PERC FX tracks. -""" -import json -import socket -import os -from datetime import datetime -from typing import Any, Dict, List - -LOG_FILE = r"C:\Users\ren\Documents\Ableton\Logs\agent9_perc.txt" -HOST = "127.0.0.1" -PORT = 9877 -TIMEOUT = 30.0 - -PERC_MAIN_TRACK_INDEX = 14 -PERC_FX_TRACK_INDEX = 15 - -PERC_MAIN_POSITIONS = [0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176] -PERC_FX_POSITIONS = [4, 12, 20, 28, 36, 44, 52, 60] - -SAMPLE_BASE = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples" - -PERC_LOOP_SAMPLES = [ - os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_01_Fm_125.wav"), - os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_02_Any_125.wav"), - os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_03_A#_125.wav"), - os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_04_Any_125.wav"), - os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_05_Any_125.wav"), - os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_06_Dm_125.wav"), - os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_07_Cm_125.wav"), - os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_08_Fm_125.wav"), - os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_09_Bm_125.wav"), - os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_10_Dm_125.wav"), - os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_11_Am_125.wav"), - os.path.join(SAMPLE_BASE, "loops", "perc", "Perc_Loop_12_Bm_125.wav"), -] - -PERC_FX_SAMPLES = [ - os.path.join(SAMPLE_BASE, "oneshots", "perc", "BBH - Primer Impacto - Shaker 2.wav"), - os.path.join(SAMPLE_BASE, "oneshots", "perc", "BBH - Primer Impacto - Shaker 3.wav"), - os.path.join(SAMPLE_BASE, "oneshots", "perc", "BBH - Primer Impacto - Bongos y Congas 1.wav"), - os.path.join(SAMPLE_BASE, "oneshots", "perc", "BBH - Primer Impacto - Bongos y Congas 2.wav"), - os.path.join(SAMPLE_BASE, "oneshots", "perc", "BBH - Primer Impacto - Bongos y Congas 3.wav"), - os.path.join(SAMPLE_BASE, "oneshots", "perc", "BBH - Primer Impacto - Bongos y Congas 4.wav"), - os.path.join(SAMPLE_BASE, "oneshots", "perc", "BBH - Primer Impacto - Shaker 6.wav"), - os.path.join(SAMPLE_BASE, "oneshots", "perc", "BBH - Primer Impacto - Shaker 8.wav"), -] - - -def log(msg: str): - timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") - line = f"[{timestamp}] {msg}" - print(line) - try: - os.makedirs(os.path.dirname(LOG_FILE), exist_ok=True) - with open(LOG_FILE, "a", encoding="utf-8") as f: - f.write(line + "\n") - except Exception as e: - print(f"Log write error: {e}") - - -class AbletonSocketClient: - def __init__(self, host: str = HOST, port: int = PORT, timeout: float = TIMEOUT): - self.host = host - self.port = port - self.timeout = timeout - - def send(self, command_type: str, params: Dict[str, Any] = None) -> Dict[str, Any]: - payload = json.dumps({ - "type": command_type, - "params": params or {}, - }).encode("utf-8") + b"\n" - - with socket.create_connection((self.host, self.port), timeout=self.timeout) as sock: - sock.sendall(payload) - reader = sock.makefile("r", encoding="utf-8") - try: - line = reader.readline() - finally: - reader.close() - try: - sock.shutdown(socket.SHUT_RDWR) - except OSError: - pass - - if not line: - raise RuntimeError(f"No response for command: {command_type}") - - return json.loads(line) - - -def set_input_routing(client: AbletonSocketClient, track_index: int, routing_name: str) -> bool: - try: - response = client.send("set_track_input_routing", { - "track_index": track_index, - "routing_name": routing_name, - }) - if response.get("status") == "success": - log(f"Set track {track_index} input routing to '{routing_name}'") - return True - else: - log(f"Failed to set input routing: {response.get('message', 'unknown error')}") - return False - except Exception as e: - log(f"Error setting input routing: {e}") - return False - - -def load_audio_pattern(client: AbletonSocketClient, track_index: int, file_path: str, positions: List[float], name: str = "") -> bool: - if not os.path.exists(file_path): - log(f"Sample not found: {file_path}") - return False - - try: - response = client.send("create_arrangement_audio_pattern", { - "track_index": track_index, - "file_path": file_path, - "positions": positions, - "name": name or os.path.basename(file_path), - }) - if response.get("status") == "success": - log(f"Loaded '{os.path.basename(file_path)}' at positions {positions[:3]}... on track {track_index}") - return True - else: - log(f"Failed to load audio: {response.get('message', 'unknown error')}") - return False - except Exception as e: - log(f"Error loading audio: {e}") - return False - - -def main(): - log("=" * 60) - log("AGENT 9 - PERCUSSION SPECIALIST STARTING") - log("=" * 60) - - client = AbletonSocketClient() - - log("Connecting to Ableton socket...") - try: - info = client.send("get_session_info", {}) - if info.get("status") != "success": - log("Failed to get session info") - return - log(f"Connected. BPM: {info.get('result', {}).get('tempo', 'unknown')}") - except Exception as e: - log(f"Connection failed: {e}") - return - - log("Setting input routing to 'No Input'...") - set_input_routing(client, PERC_MAIN_TRACK_INDEX, "No Input") - set_input_routing(client, PERC_FX_TRACK_INDEX, "No Input") - - log("") - log("Loading PERC MAIN loops...") - main_loaded = 0 - for i, pos in enumerate(PERC_MAIN_POSITIONS): - if i < len(PERC_LOOP_SAMPLES): - sample = PERC_LOOP_SAMPLES[i] - if load_audio_pattern(client, PERC_MAIN_TRACK_INDEX, sample, [float(pos)], f"PERC_LOOP_{i+1}"): - main_loaded += 1 - - log(f"PERC MAIN: {main_loaded}/{len(PERC_MAIN_POSITIONS)} samples loaded") - - log("") - log("Loading PERC FX hits...") - fx_loaded = 0 - for i, pos in enumerate(PERC_FX_POSITIONS): - if i < len(PERC_FX_SAMPLES): - sample = PERC_FX_SAMPLES[i] - if load_audio_pattern(client, PERC_FX_TRACK_INDEX, sample, [float(pos)], f"PERC_FX_{i+1}"): - fx_loaded += 1 - - log(f"PERC FX: {fx_loaded}/{len(PERC_FX_POSITIONS)} samples loaded") - - log("") - log("=" * 60) - log(f"AGENT 9 COMPLETE: MAIN={main_loaded}, FX={fx_loaded}") - log("=" * 60) - - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/audio_analyzer.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/audio_analyzer.py deleted file mode 100644 index 29feefa..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/audio_analyzer.py +++ /dev/null @@ -1,681 +0,0 @@ -""" -audio_analyzer.py - Análisis de audio para detección de Key y BPM - -Proporciona análisis básico de archivos de audio para extraer: -- BPM (tempo) mediante detección de onset y autocorrelación -- Key (tonalidad) mediante análisis de cromagrama -- Características espectrales para clasificación -""" - -import os -import logging -import numpy as np -import subprocess -from pathlib import Path -from typing import Dict, Any, Optional, Tuple, List -from dataclasses import dataclass -from enum import Enum - -logger = logging.getLogger("AudioAnalyzer") - -# Constantes musicales -NOTE_NAMES = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B'] -KEY_PROFILES = { - # Perfiles de Krumhansl-Schmuckler para detección de tonalidad - 'major': [6.35, 2.23, 3.48, 2.33, 4.38, 4.09, 2.52, 5.19, 2.39, 3.66, 2.29, 2.88], - 'minor': [6.33, 2.68, 3.52, 5.38, 2.60, 3.53, 2.54, 4.75, 3.98, 2.69, 3.34, 3.17] -} - -CIRCLE_OF_FIFTHS_MAJOR = ['C', 'G', 'D', 'A', 'E', 'B', 'F#', 'C#', 'G#', 'D#', 'A#', 'F'] -CIRCLE_OF_FIFTHS_MINOR = ['Am', 'Em', 'Bm', 'F#m', 'C#m', 'G#m', 'D#m', 'A#m', 'Fm', 'Cm', 'Gm', 'Dm'] - - -class SampleType(Enum): - """Tipos de samples musicales""" - KICK = "kick" - SNARE = "snare" - CLAP = "clap" - HAT_CLOSED = "hat_closed" - HAT_OPEN = "hat_open" - HAT = "hat" - PERC = "perc" - SHAKER = "shaker" - TOM = "tom" - CRASH = "crash" - RIDE = "ride" - BASS = "bass" - SYNTH = "synth" - PAD = "pad" - LEAD = "lead" - PLUCK = "pluck" - ARP = "arp" - CHORD = "chord" - STAB = "stab" - VOCAL = "vocal" - FX = "fx" - LOOP = "loop" - AMBIENCE = "ambience" - UNKNOWN = "unknown" - - -@dataclass -class AudioFeatures: - """Características extraídas de un archivo de audio""" - bpm: Optional[float] - key: Optional[str] - key_confidence: float - duration: float - sample_rate: int - sample_type: SampleType - spectral_centroid: float - spectral_rolloff: float - zero_crossing_rate: float - rms_energy: float - is_harmonic: bool - is_percussive: bool - suggested_genres: List[str] - - -class AudioAnalyzer: - """ - Analizador de audio para samples musicales. - - Soporta múltiples backends: - - librosa (recomendado, más preciso) - - basic (fallback sin dependencias externas, basado en nombre de archivo) - """ - - def __init__(self, backend: str = "auto"): - """ - Inicializa el analizador de audio. - - Args: - backend: 'librosa', 'basic', o 'auto' (detecta automáticamente) - """ - self.backend = backend - self._librosa_available = False - self._soundfile_available = False - - if backend in ("auto", "librosa"): - self._check_librosa() - - if self._librosa_available: - logger.info("Usando backend: librosa") - else: - logger.info("Usando backend: basic (análisis por nombre de archivo)") - - def _check_librosa(self): - """Verifica si librosa está disponible""" - try: - import librosa - import soundfile as sf - self._librosa_available = True - self._soundfile_available = True - self.librosa = librosa - self.sf = sf - except ImportError: - self._librosa_available = False - self._soundfile_available = False - - def analyze(self, file_path: str) -> AudioFeatures: - """ - Analiza un archivo de audio y extrae características. - - Args: - file_path: Ruta al archivo de audio - - Returns: - AudioFeatures con los datos extraídos - """ - path = Path(file_path) - - if not path.exists(): - raise FileNotFoundError(f"Archivo no encontrado: {file_path}") - - # Intentar análisis con librosa si está disponible - if self._librosa_available: - try: - return self._analyze_with_librosa(file_path) - except Exception as e: - logger.warning(f"Error con librosa: {e}, usando análisis básico") - - # Fallback a análisis básico - return self._analyze_basic(file_path) - - def _analyze_with_librosa(self, file_path: str) -> AudioFeatures: - """Análisis completo usando librosa""" - # Cargar audio - y, sr = self.librosa.load(file_path, sr=None, mono=True) - - # Duración - duration = self.librosa.get_duration(y=y, sr=sr) - - # Detectar BPM - tempo, _ = self.librosa.beat.beat_track(y=y, sr=sr) - bpm = float(tempo) if isinstance(tempo, (int, float, np.number)) else None - - # Análisis espectral - spectral_centroids = self.librosa.feature.spectral_centroid(y=y, sr=sr)[0] - spectral_rolloffs = self.librosa.feature.spectral_rolloff(y=y, sr=sr)[0] - zcr = self.librosa.feature.zero_crossing_rate(y)[0] - rms = self.librosa.feature.rms(y=y)[0] - - # Detectar key - key, key_confidence = self._detect_key_librosa(y, sr) - - # Clasificación percusivo vs armónico - is_percussive = self._is_percussive(y, sr) - is_harmonic = not is_percussive and duration > 1.0 - - # Determinar tipo de sample - sample_type = self._classify_sample_type( - file_path, is_percussive, is_harmonic, duration, - float(np.mean(spectral_centroids)), float(np.mean(rms)) - ) - - # Sugerir géneros - suggested_genres = self._suggest_genres(sample_type, bpm, key) - - return AudioFeatures( - bpm=bpm, - key=key, - key_confidence=key_confidence, - duration=duration, - sample_rate=sr, - sample_type=sample_type, - spectral_centroid=float(np.mean(spectral_centroids)), - spectral_rolloff=float(np.mean(spectral_rolloffs)), - zero_crossing_rate=float(np.mean(zcr)), - rms_energy=float(np.mean(rms)), - is_harmonic=is_harmonic, - is_percussive=is_percussive, - suggested_genres=suggested_genres - ) - - def _detect_key_librosa(self, y: np.ndarray, sr: int) -> Tuple[Optional[str], float]: - """ - Detecta la tonalidad usando cromagrama y correlación con perfiles. - """ - try: - # Calcular cromagrama - chroma = self.librosa.feature.chroma_stft(y=y, sr=sr) - chroma_avg = np.mean(chroma, axis=1) - - # Normalizar - chroma_avg = chroma_avg / (np.sum(chroma_avg) + 1e-10) - - best_key = None - best_score = -np.inf - best_mode = None - - # Probar todas las tonalidades mayores y menores - for mode, profile in KEY_PROFILES.items(): - for i in range(12): - # Rotar el perfil - rotated_profile = np.roll(profile, i) - # Correlación - score = np.corrcoef(chroma_avg, rotated_profile)[0, 1] - - if score > best_score: - best_score = score - best_mode = mode - best_key = NOTE_NAMES[i] - - # Formatear resultado - if best_key: - if best_mode == 'minor': - best_key = best_key + 'm' - confidence = max(0.0, min(1.0, (best_score + 1) / 2)) - return best_key, confidence - - except Exception as e: - logger.warning(f"Error detectando key: {e}") - - return None, 0.0 - - def _is_percussive(self, y: np.ndarray, sr: int) -> bool: - """ - Determina si un sonido es principalmente percusivo. - """ - try: - # Separar componentes armónicos y percusivos - y_harmonic, y_percussive = self.librosa.effects.hpss(y) - - # Calcular energía relativa - energy_harmonic = np.sum(y_harmonic ** 2) - energy_percussive = np.sum(y_percussive ** 2) - total_energy = energy_harmonic + energy_percussive - - if total_energy > 0: - percussive_ratio = energy_percussive / total_energy - return percussive_ratio > 0.6 - - except Exception as e: - logger.warning(f"Error en separación HPSS: {e}") - - # Fallback: usar duración como heurística - duration = len(y) / sr - return duration < 0.5 - - def _analyze_basic(self, file_path: str) -> AudioFeatures: - """ - Análisis básico sin dependencias externas. - Usa metadatos del archivo y nombre para inferir características. - """ - path = Path(file_path) - name = path.stem - - # Extraer del nombre - bpm = self._extract_bpm_from_name(name) - key = self._extract_key_from_name(name) - - # Estimar duración del archivo - duration = self._estimate_duration(file_path) - - # Clasificar por nombre - sample_type = self._classify_by_name(name) - - # Determinar características por tipo - is_percussive = sample_type in [ - SampleType.KICK, SampleType.SNARE, SampleType.CLAP, - SampleType.HAT, SampleType.HAT_CLOSED, SampleType.HAT_OPEN, - SampleType.PERC, SampleType.SHAKER, SampleType.TOM, - SampleType.CRASH, SampleType.RIDE - ] - is_harmonic = sample_type in [ - SampleType.BASS, SampleType.SYNTH, SampleType.PAD, - SampleType.LEAD, SampleType.PLUCK, SampleType.CHORD, - SampleType.VOCAL - ] - - # Valores por defecto basados en tipo - spectral_centroid = 5000.0 if is_percussive else 1000.0 - rms_energy = 0.5 - - suggested_genres = self._suggest_genres(sample_type, bpm, key) - - return AudioFeatures( - bpm=bpm, - key=key, - key_confidence=0.7 if key else 0.0, - duration=duration, - sample_rate=44100, - sample_type=sample_type, - spectral_centroid=spectral_centroid, - spectral_rolloff=spectral_centroid * 2, - zero_crossing_rate=0.1 if is_harmonic else 0.3, - rms_energy=rms_energy, - is_harmonic=is_harmonic, - is_percussive=is_percussive, - suggested_genres=suggested_genres - ) - - def _estimate_duration(self, file_path: str) -> float: - """Estima la duración del archivo de audio""" - try: - import wave - - ext = Path(file_path).suffix.lower() - - if ext == '.wav': - with wave.open(file_path, 'rb') as wav: - frames = wav.getnframes() - rate = wav.getframerate() - return frames / float(rate) - - elif ext in ('.mp3', '.ogg', '.flac', '.aif', '.aiff', '.m4a'): - windows_duration = self._estimate_duration_with_windows_shell(file_path) - if windows_duration > 0: - return windows_duration - # Estimación por tamaño de archivo - size = os.path.getsize(file_path) - # Aproximación: ~176KB por segundo para CD quality stereo - return size / (176.4 * 1024) - - except Exception as e: - logger.warning(f"Error estimando duración: {e}") - - return 0.0 - - def _estimate_duration_with_windows_shell(self, file_path: str) -> float: - """Obtiene la duración usando metadatos del shell de Windows cuando están disponibles.""" - if os.name != 'nt': - return 0.0 - - safe_path = file_path.replace("'", "''") - powershell_command = ( - f"$path = '{safe_path}'; " - "$shell = New-Object -ComObject Shell.Application; " - "$folder = $shell.Namespace((Split-Path $path)); " - "$file = $folder.ParseName((Split-Path $path -Leaf)); " - "$duration = $folder.GetDetailsOf($file, 27); " - "Write-Output $duration" - ) - try: - result = subprocess.run( - f'powershell -NoProfile -Command "{powershell_command}"', - capture_output=True, - text=True, - timeout=5, - check=False, - shell=True, - ) - value = (result.stdout or "").strip() - if not value: - return 0.0 - parts = value.split(':') - if len(parts) == 3: - return (int(parts[0]) * 3600) + (int(parts[1]) * 60) + float(parts[2]) - return 0.0 - except Exception: - return 0.0 - - def _extract_bpm_from_name(self, name: str) -> Optional[float]: - """Extrae BPM del nombre del archivo""" - import re - - patterns = [ - r'[_\s\-](\d{2,3})\s*BPM', - r'[_\s\-](\d{2,3})[_\s\-]', - r'(\d{2,3})bpm', - r'[_\s\-](\d{2,3})\s*(?:BPM|bpm)?\s*(?:\.wav|\.mp3|\.aif)', - ] - - for pattern in patterns: - match = re.search(pattern, name, re.IGNORECASE) - if match: - bpm = int(match.group(1)) - if 60 <= bpm <= 200: - return float(bpm) - - return None - - def _extract_key_from_name(self, name: str) -> Optional[str]: - """Extrae key del nombre del archivo""" - import re - - patterns = [ - r'[_\s\-]([A-G][#b]?(?:m|min|minor)?)[_\s\-]', - r'\bin\s+([A-G][#b]?(?:m|min|minor)?)\b', - r'Key\s+([A-G][#b]?(?:m|min|minor)?)', - r'[_\s\-]([A-G][#b]?)\s*(?:maj|major)?[_\s\-]', - ] - - for pattern in patterns: - match = re.search(pattern, name, re.IGNORECASE) - if match: - key = match.group(1) - # Normalizar - key = key.replace('b', '#').replace('Db', 'C#').replace('Eb', 'D#') - key = key.replace('Gb', 'F#').replace('Ab', 'G#').replace('Bb', 'A#') - - # Detectar si es menor - is_minor = 'm' in key.lower() or 'min' in key.lower() - key = key.replace('min', '').replace('minor', '').replace('major', '') - key = key.rstrip('mM') - - if is_minor: - key = key + 'm' - - return key - - return None - - def _classify_sample_type(self, file_path: str, is_percussive: bool, - is_harmonic: bool, duration: float, - spectral_centroid: float, rms: float) -> SampleType: - """Clasifica el tipo de sample basado en características""" - # Primero intentar por nombre - sample_type = self._classify_by_name(Path(file_path).stem) - if sample_type != SampleType.UNKNOWN: - return sample_type - - # Clasificación por características de audio - if is_percussive: - if duration < 0.1: - if spectral_centroid < 2000: - return SampleType.KICK - elif spectral_centroid > 8000: - return SampleType.HAT_CLOSED - else: - return SampleType.SNARE - elif duration < 0.3: - return SampleType.CLAP - else: - return SampleType.PERC - - elif is_harmonic: - if spectral_centroid < 500: - return SampleType.BASS - elif duration > 4.0: - return SampleType.PAD - else: - return SampleType.SYNTH - - return SampleType.UNKNOWN - - def _classify_by_name(self, name: str) -> SampleType: - """Clasifica el tipo de sample basado en su nombre""" - name_lower = name.lower() - - # Mapeo de palabras clave a tipos - keywords = { - SampleType.KICK: ['kick', 'bd', 'bass drum', 'kickdrum', 'kik'], - SampleType.SNARE: ['snare', 'snr', 'sd', 'rim'], - SampleType.CLAP: ['clap', 'clp', 'handclap'], - SampleType.HAT_CLOSED: ['closed hat', 'closedhat', 'chh', 'closed'], - SampleType.HAT_OPEN: ['open hat', 'openhat', 'ohh', 'open'], - SampleType.HAT: ['hat', 'hihat', 'hi-hat', 'hh'], - SampleType.PERC: ['perc', 'percussion', 'conga', 'bongo', 'timb'], - SampleType.SHAKER: ['shaker', 'shake', 'tamb'], - SampleType.TOM: ['tom', 'tomtom'], - SampleType.CRASH: ['crash', 'cymbal'], - SampleType.RIDE: ['ride'], - SampleType.BASS: ['bass', 'bassline', 'sub', '808', 'reese'], - SampleType.SYNTH: ['synth', 'lead', 'arp', 'sequence'], - SampleType.PAD: ['pad', 'atmosphere', 'dron'], - SampleType.PLUCK: ['pluck'], - SampleType.CHORD: ['chord', 'stab'], - SampleType.VOCAL: ['vocal', 'vox', 'voice', 'speech', 'talk'], - SampleType.FX: ['fx', 'effect', 'sweep', 'riser', 'downlifter', 'impact', 'hit', 'noise'], - SampleType.LOOP: ['loop', 'full', 'groove'], - } - - for sample_type, words in keywords.items(): - for word in words: - if word in name_lower: - return sample_type - - return SampleType.UNKNOWN - - def _suggest_genres(self, sample_type: SampleType, bpm: Optional[float], - key: Optional[str]) -> List[str]: - """Sugiere géneros musicales apropiados para el sample""" - genres = [] - - if bpm: - if 118 <= bpm <= 128: - genres.extend(['house', 'tech-house', 'deep-house']) - elif 124 <= bpm <= 132: - genres.extend(['tech-house', 'techno']) - elif 132 <= bpm <= 142: - genres.extend(['techno', 'peak-time-techno']) - elif 142 <= bpm <= 150: - genres.extend(['trance', 'hard-techno']) - elif 160 <= bpm <= 180: - genres.extend(['drum-and-bass', 'neurofunk']) - elif bpm < 118: - genres.extend(['downtempo', 'ambient', 'lo-fi']) - - # Por tipo de sample - if sample_type in [SampleType.KICK, SampleType.SNARE, SampleType.CLAP]: - if not genres: - genres = ['techno', 'house'] - elif sample_type == SampleType.BASS: - if not genres: - genres = ['techno', 'house', 'bass-music'] - elif sample_type in [SampleType.SYNTH, SampleType.PAD]: - if not genres: - genres = ['trance', 'progressive', 'ambient'] - - return genres if genres else ['electronic'] - - def get_compatible_key(self, key: str, shift: int = 0) -> str: - """ - Obtiene una key compatible usando el círculo de quintas. - - Args: - key: Key original (ej: 'Am', 'F#m') - shift: Desplazamiento en el círculo (+1 = quinta arriba, -1 = quinta abajo) - - Returns: - Key resultante - """ - is_minor = key.endswith('m') - root = key.rstrip('m') - - if root not in NOTE_NAMES: - return key - - circle = CIRCLE_OF_FIFTHS_MINOR if is_minor else CIRCLE_OF_FIFTHS_MAJOR - - try: - idx = circle.index(key) - new_idx = (idx + shift) % 12 - return circle[new_idx] - except ValueError: - return key - - def calculate_key_compatibility(self, key1: str, key2: str) -> float: - """ - Calcula la compatibilidad entre dos keys (0-1). - - Usa el círculo de quintas: keys cercanas son más compatibles. - """ - if key1 == key2: - return 1.0 - - # Normalizar - def normalize(k): - is_minor = k.endswith('m') - root = k.rstrip('m') - # Convertir bemoles a sostenidos - root = root.replace('Db', 'C#').replace('Eb', 'D#') - root = root.replace('Gb', 'F#').replace('Ab', 'G#').replace('Bb', 'A#') - return root + ('m' if is_minor else '') - - k1 = normalize(key1) - k2 = normalize(key2) - - if k1 == k2: - return 1.0 - - # Verificar si son modos diferentes de la misma nota - if k1.rstrip('m') == k2.rstrip('m'): - return 0.8 # Mismo root, diferente modo - - # Usar círculo de quintas - is_minor1 = k1.endswith('m') - is_minor2 = k2.endswith('m') - - if is_minor1 != is_minor2: - return 0.3 # Diferente modo, baja compatibilidad - - circle = CIRCLE_OF_FIFTHS_MINOR if is_minor1 else CIRCLE_OF_FIFTHS_MAJOR - - try: - idx1 = circle.index(k1) - idx2 = circle.index(k2) - distance = min(abs(idx1 - idx2), 12 - abs(idx1 - idx2)) - - # Compatibilidad decrece con la distancia - compatibility = max(0.0, 1.0 - (distance * 0.2)) - return compatibility - - except ValueError: - return 0.0 - - -# Instancia global -_analyzer: Optional[AudioAnalyzer] = None - - -def get_analyzer() -> AudioAnalyzer: - """Obtiene la instancia global del analizador""" - global _analyzer - if _analyzer is None: - _analyzer = AudioAnalyzer() - return _analyzer - - -def analyze_sample(file_path: str) -> Dict[str, Any]: - """ - Función de conveniencia para analizar un sample. - - Returns: - Diccionario con las características del sample - """ - analyzer = get_analyzer() - features = analyzer.analyze(file_path) - - return { - 'bpm': features.bpm, - 'key': features.key, - 'key_confidence': features.key_confidence, - 'duration': features.duration, - 'sample_rate': features.sample_rate, - 'sample_type': features.sample_type.value, - 'spectral_centroid': features.spectral_centroid, - 'rms_energy': features.rms_energy, - 'is_harmonic': features.is_harmonic, - 'is_percussive': features.is_percussive, - 'suggested_genres': features.suggested_genres, - } - - -def quick_analyze(file_path: str) -> Dict[str, Any]: - """ - Análisis rápido basado solo en el nombre del archivo. - No requiere dependencias externas. - """ - analyzer = AudioAnalyzer(backend="basic") - features = analyzer.analyze(file_path) - - return { - 'bpm': features.bpm, - 'key': features.key, - 'sample_type': features.sample_type.value, - 'suggested_genres': features.suggested_genres, - } - - -# Testing -if __name__ == "__main__": - import sys - - logging.basicConfig(level=logging.INFO) - - if len(sys.argv) < 2: - print("Uso: python audio_analyzer.py ") - sys.exit(1) - - file_path = sys.argv[1] - - print(f"\nAnalizando: {file_path}") - print("=" * 50) - - try: - result = analyze_sample(file_path) - - print("\nResultados:") - print(f" BPM: {result['bpm'] or 'No detectado'}") - print(f" Key: {result['key'] or 'No detectado'} (confianza: {result['key_confidence']:.2f})") - print(f" Duración: {result['duration']:.2f}s") - print(f" Tipo: {result['sample_type']}") - print(f" Géneros sugeridos: {', '.join(result['suggested_genres'])}") - print(f" Es percusivo: {result['is_percussive']}") - print(f" Es armónico: {result['is_harmonic']}") - - except Exception as e: - print(f"Error: {e}") - sys.exit(1) diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/audio_resampler.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/audio_resampler.py deleted file mode 100644 index ec23c7f..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/audio_resampler.py +++ /dev/null @@ -1,2466 +0,0 @@ -""" -audio_resampler.py - Deriva transiciones y FX propios desde los samples elegidos. - -Phase 1 Improvements: -- Cache robusto con invalidacion por mtime, size y edad maxima -- Crossfades equal-power para eliminar clicks -- HPF/LPF sweeps suaves con overlap-add y filtros butterworth de 4to orden -- Normalizacion con soft limiting mejorado (curva cubica + lookahead) -""" - -from __future__ import annotations - -import hashlib -import logging -import os -import time -from collections import OrderedDict -from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple - -import numpy as np - -try: - import soundfile as sf -except ImportError: # pragma: no cover - sf = None - -try: - import librosa -except ImportError: # pragma: no cover - librosa = None - -try: - from scipy import signal as scipy_signal -except ImportError: # pragma: no cover - scipy_signal = None - - -logger = logging.getLogger("AudioResampler") - - -def _safe_float(value: Any, default: float = 0.0) -> float: - try: - return float(value) - except Exception: - return float(default) - - -def _section_offsets(sections: List[Dict[str, Any]]) -> List[Tuple[Dict[str, Any], float, float]]: - offsets: List[Tuple[Dict[str, Any], float, float]] = [] - cursor = 0.0 - for section in sections: - beats = _safe_float(section.get("beats", 0.0), _safe_float(section.get("bars", 8), 8.0) * 4.0) - start = float(cursor) - end = float(cursor + max(1.0, beats)) - offsets.append((section, start, end)) - cursor = end - return offsets - - -def _samples_from_seconds(seconds: float, sample_rate: int, min_samples: int = 256) -> int: - """Convierte segundos a samples con minimo garantizado. - - Args: - seconds: Duracion en segundos - sample_rate: Tasa de muestreo en Hz - min_samples: Minimo de samples a retornar (default: 256) - - Returns: - Numero de samples con minimo garantizado - """ - return max(min_samples, int(round(seconds * sample_rate))) - - -def _seconds_from_samples(samples: int, sample_rate: int, min_duration: float = 0.05) -> float: - """Convierte samples a segundos. - - Args: - samples: Numero de samples - sample_rate: Tasa de muestreo en Hz - min_duration: Duracion minima en segundos si samples es 0 (default: 0.05) - - Returns: - Duracion en segundos - """ - return samples / sample_rate if samples > 0 else min_duration - - - -def _ensure_2d_float(audio: np.ndarray) -> np.ndarray: - """Asegura que el array sea 2D float32 (samples, channels).""" - if audio is None or audio.size == 0: - return np.zeros((1, 1), dtype=np.float32) - audio = np.asarray(audio, dtype=np.float32) - if audio.ndim == 1: - audio = audio.reshape(-1, 1) - return audio - - -def _safe_slice(audio: np.ndarray, start: int, end: int) -> np.ndarray: - """Extrae slice seguro que nunca retorna array vacio.""" - if audio is None or audio.size == 0: - channels = audio.shape[1] if (audio is not None and audio.ndim == 2) else 1 - return np.zeros((1, channels), dtype=np.float32) - start = max(0, min(start, audio.shape[0] - 1)) - end = max(start + 1, min(end, audio.shape[0])) - result = audio[start:end] - if result.size == 0: - return np.zeros((1, audio.shape[1]), dtype=np.float32) - return result - - -def _validate_mix_shapes(a: np.ndarray, b: np.ndarray) -> Tuple[bool, str]: - """Valida que dos arrays puedan mezclarse (broadcast compatible).""" - if a is None or b is None: - return False, "None array" - if a.size == 0 or b.size == 0: - return False, f"Empty array: a.shape={a.shape}, b.shape={b.shape}" - if a.ndim != b.ndim: - return False, f"Dimension mismatch: {a.ndim} vs {b.ndim}" - if a.shape[1] != b.shape[1]: - return False, f"Channel mismatch: {a.shape[1]} vs {b.shape[1]}" - return True, "OK" - - -class AudioResampler: - """Procesa audio para generar transiciones y FX. - - Phase 1 Improvements: - - Cache LRU con invalidacion por mtime, size y edad maxima - - Estadisticas de cache (hits/misses) - - Crossfades equal-power para mejor calidad - - HPF/LPF sweeps con filtros butterworth de 4to orden - - Soft limiting mejorado con curva cubica - """ - - # Limite maximo de archivos en cache - _CACHE_LIMIT: int = 50 - - # Edad maxima de cache en segundos (30 minutos) - _CACHE_MAX_AGE_S: float = 1800.0 - - # Tamanio maximo de cache en bytes (~500MB por defecto) - _CACHE_MAX_SIZE_BYTES: int = 500 * 1024 * 1024 - - # Valor de peak unificado para todos los renders (85% headroom) - _DEFAULT_PEAK: float = 0.85 - - # Crossfade samples por defecto (10ms a 44.1kHz) - _DEFAULT_CROSSFADE_SAMPLES: int = 441 - - # Minimos absolutos para evitar arrays vacios en procesamiento - _MIN_SAMPLES_FOR_FFT: int = 512 # Minimo para analisis espectral - _MIN_SAMPLES_FOR_WINDOW: int = 64 # Minimo para aplicar ventana - _MIN_SAMPLES_FOR_STRETCH: int = 100 # Minimo para time-stretch - _MIN_SAMPLES_FOR_SLICE: int = 32 # Minimo para slice de stutter - _MIN_SAMPLES_FOR_EFFECT: int = 256 # Minimo para aplicar cualquier efecto - _MIN_AUDIO_DURATION_S: float = 0.05 # 50ms minimo de audio - - def __init__(self, output_dir: Optional[str] = None, sample_rate: int = 44100): - local_root = Path(os.environ.get("LOCALAPPDATA", Path.home() / "AppData" / "Local")) - self.output_dir = Path(output_dir) if output_dir else local_root / "AbletonMCP_AI" / "generated_audio" - self.output_dir.mkdir(parents=True, exist_ok=True) - self.sample_rate = max(1, int(sample_rate)) # Validacion defensiva - - # Cache LRU para audio cargado: path::mtime_ns::size -> (audio_array, sample_rate, timestamp) - # El mtime_ns es parte de la key para invalidacion automatica por modificacion - # timestamp se usa para invalidacion por edad maxima - self._audio_cache: OrderedDict[str, Tuple[np.ndarray, int, float]] = OrderedDict() - - # Metadatos de cache para tracking de memoria - self._cache_sizes: Dict[str, int] = {} # path -> bytes - self._cache_total_bytes: int = 0 - - # Estadisticas de cache - self._cache_hits: int = 0 - self._cache_misses: int = 0 - - def _validate_audio_array(self, audio: np.ndarray, context: str = "audio") -> np.ndarray: - """Valida y normaliza un array de audio. - - Args: - audio: Array a validar - context: Descripcion del contexto para mensajes de error - - Returns: - Array validado como float32 y al menos 2D - - Raises: - ValueError: Si el array esta vacio o es invalido - """ - if audio is None: - raise ValueError(f"{context}: audio es None") - - audio = np.asarray(audio, dtype=np.float32) - - if audio.size == 0: - raise ValueError(f"{context}: audio array esta vacio") - - # Asegurar que sea 2D (samples, channels) - if audio.ndim == 1: - audio = audio.reshape(-1, 1) - - return audio - - def _validate_positive(self, value: float, name: str) -> float: - """Valida que un valor sea positivo. - - Args: - value: Valor a validar - name: Nombre del parametro para mensaje de error - - Returns: - Valor validado como float - - Raises: - ValueError: Si el valor no es positivo - """ - try: - val = float(value) - except (TypeError, ValueError): - raise ValueError(f"{name}: debe ser un numero valido, recibido {value!r}") - - if val <= 0: - raise ValueError(f"{name}: debe ser positivo, recibido {val}") - - return val - - def _get_cache_key(self, file_path: str, mtime_ns: Optional[int] = None, file_size: Optional[int] = None) -> str: - """Genera key de cache a partir del path absoluto, mtime y size. - - Args: - file_path: Ruta al archivo - mtime_ns: Tiempo de modificacion en nanosegundos (opcional) - file_size: Tamanio del archivo en bytes (opcional) - - Returns: - Key unica que incluye mtime y size si se proporcionan - """ - base_key = str(Path(file_path).resolve()) - parts = [base_key] - if mtime_ns is not None: - parts.append(str(mtime_ns)) - if file_size is not None: - parts.append(str(file_size)) - return "::".join(parts) - - def _cache_get(self, key: str) -> Optional[Tuple[np.ndarray, int]]: - """Obtiene audio del cache (LRU: mueve al final si existe). - - Returns: - Tupla (audio_array, sample_rate) o None si no existe o expiro - """ - if key not in self._audio_cache: - self._cache_misses += 1 - return None - - cached_data = self._audio_cache[key] - # Nuevo formato: (audio, sample_rate, timestamp) - if len(cached_data) == 3: - audio, sample_rate, timestamp = cached_data - # Verificar edad maxima - if time.time() - timestamp > self._CACHE_MAX_AGE_S: - logger.debug("Cache entry expired by age: %s", key) - self._evict_cache_entry(key) - self._cache_misses += 1 - return None - else: - # Formato legacy: (audio, sample_rate) - audio, sample_rate = cached_data[:2] - - # Mover al final (mas reciente) - self._audio_cache.move_to_end(key) - self._cache_hits += 1 - return (audio, sample_rate) - - def _evict_cache_entry(self, key: str) -> None: - """Evict una entrada especifica del cache y actualiza contadores.""" - if key in self._audio_cache: - if key in self._cache_sizes: - self._cache_total_bytes -= self._cache_sizes[key] - del self._cache_sizes[key] - del self._audio_cache[key] - - def _cache_put(self, key: str, audio: np.ndarray, sample_rate: int) -> None: - """Agrega audio al cache con limite LRU y de memoria.""" - # Calcular tamanio en bytes - entry_size = audio.nbytes - - # Si ya existe, actualizar y mover al final - if key in self._audio_cache: - old_size = self._cache_sizes.get(key, 0) - self._cache_total_bytes -= old_size - self._cache_sizes[key] = entry_size - self._cache_total_bytes += entry_size - self._audio_cache[key] = (audio, sample_rate, time.time()) - self._audio_cache.move_to_end(key) - return - - # Evict entries si excede limite de memoria - while (self._cache_total_bytes + entry_size > self._CACHE_MAX_SIZE_BYTES - and len(self._audio_cache) > 0): - oldest_key = next(iter(self._audio_cache)) - self._evict_cache_entry(oldest_key) - logger.debug("Evicted cache entry (memory limit): %s", oldest_key) - - # Si el cache esta lleno por cantidad, eliminar el mas antiguo (primero) - while len(self._audio_cache) >= self._CACHE_LIMIT: - oldest_key = next(iter(self._audio_cache)) - self._evict_cache_entry(oldest_key) - logger.debug("Evicted cache entry (count limit): %s", oldest_key) - - # Agregar nueva entrada - self._cache_sizes[key] = entry_size - self._cache_total_bytes += entry_size - self._audio_cache[key] = (audio, sample_rate, time.time()) - - def _load_audio(self, file_path: str) -> Tuple[np.ndarray, int]: - """Carga un archivo de audio con cache LRU e invalidacion por mtime, size y edad. - - Args: - file_path: Ruta al archivo de audio - - Returns: - Tupla (audio_array, sample_rate) - - Raises: - RuntimeError: Si no se puede leer el archivo - """ - if not file_path: - raise RuntimeError("file_path esta vacio") - - path = Path(file_path) - - if not path.exists(): - raise RuntimeError(f"Archivo no encontrado: {path}") - - # Obtener mtime y size antes de cualquier operacion - stat_info = path.stat() - mtime_ns = stat_info.st_mtime_ns - file_size = stat_info.st_size - cache_key = self._get_cache_key(file_path, mtime_ns, file_size) - - # Intentar obtener del cache (la key incluye mtime y size, si cambio no se encontrara) - cached = self._cache_get(cache_key) - if cached is not None: - duration_s = len(cached[0]) / cached[1] - logger.debug("Cache hit for %s (sample_rate=%d, duration=%.2fs, hits=%d, misses=%d)", - path.name, cached[1], duration_s, self._cache_hits, self._cache_misses) - # Devolver copia para evitar mutaciones - return np.array(cached[0], dtype=np.float32, copy=True), cached[1] - - logger.debug("Cache miss for %s, reading from disk (hits=%d, misses=%d)", - path.name, self._cache_hits, self._cache_misses) - - if sf is not None: - try: - audio, sample_rate = sf.read(str(path), always_2d=True, dtype="float32") - - # Validacion defensiva - verificar que no este vacio - if audio.size == 0: - logger.warning("AUDIO_LOAD: fallback to silence (empty audio from %s)", path.name) - silence = np.zeros((int(self.sample_rate), 2), dtype=np.float32) - return silence, self.sample_rate - - duration_s = len(audio) / sample_rate - logger.debug("Loaded from disk via soundfile: %s (sample_rate=%d, duration=%.2fs, channels=%d)", - path.name, sample_rate, duration_s, audio.shape[1]) - - if sample_rate != self.sample_rate: - logger.debug("Resampling %s from %d to %d Hz", path.name, sample_rate, self.sample_rate) - audio = self._resample_audio(audio, sample_rate, self.sample_rate) - sample_rate = self.sample_rate - - # Guardar en cache - self._cache_put(cache_key, audio, sample_rate) - logger.debug("Cached audio: %s (total_cache_size=%.2fMB)", path.name, self._cache_total_bytes / (1024*1024)) - return np.array(audio, dtype=np.float32, copy=True), sample_rate - - except Exception as exc: - logger.debug("soundfile fallo para %s: %s", path.name, exc) - - if librosa is None: - raise RuntimeError(f"No se pudo leer audio (sin soundfile ni librosa): {path.name}") - - logger.debug("Falling back to librosa for: %s", path.name) - try: - audio, sample_rate = librosa.load(str(path), sr=self.sample_rate, mono=True) - audio = np.asarray(audio, dtype=np.float32).reshape(-1, 1) - audio = np.repeat(audio, 2, axis=1) - - # Validacion defensiva - verificar que no este vacio - if audio.size == 0: - logger.warning("AUDIO_LOAD: fallback to silence (empty audio from %s)", path.name) - silence = np.zeros((int(self.sample_rate), 2), dtype=np.float32) - return silence, self.sample_rate - - duration_s = len(audio) / self.sample_rate - logger.debug("Loaded via librosa: %s (sample_rate=%d, duration=%.2fs, channels=2)", - path.name, self.sample_rate, duration_s) - - # Guardar en cache - self._cache_put(cache_key, audio, self.sample_rate) - logger.debug("Cached audio: %s", cache_key) - return np.array(audio, dtype=np.float32, copy=True), self.sample_rate - - except Exception as exc: - logger.error("No se pudo leer audio con librosa: %s: %s", path.name, exc) - raise RuntimeError(f"No se pudo leer audio con librosa: {path.name}: {exc}") - - def _write_audio(self, file_path: Path, audio: np.ndarray, sample_rate: int) -> str: - """Escribe audio a archivo WAV. - - Args: - file_path: Ruta de destino - audio: Array de audio - sample_rate: Sample rate - - Returns: - Ruta del archivo escrito como string - - Raises: - RuntimeError: Si soundfile no esta disponible o el audio es invalido - """ - if sf is None: - raise RuntimeError("soundfile no disponible para escribir audio") - - # Validacion defensiva - audio = self._validate_audio_array(audio, context="_write_audio") - sample_rate = self._validate_positive(sample_rate, "sample_rate") - - if audio.ndim == 1: - audio = audio.reshape(-1, 1) - if audio.shape[1] == 1: - audio = np.repeat(audio, 2, axis=1) - sf.write(str(file_path), audio, int(sample_rate)) - return str(file_path) - - def _resample_audio(self, audio: np.ndarray, source_sr: int, target_sr: int) -> np.ndarray: - """Cambia el sample rate de audio. - - Args: - audio: Array de audio - source_sr: Sample rate origen - target_sr: Sample rate destino - - Returns: - Audio resampleado - """ - # Validaciones defensivas - audio = self._validate_audio_array(audio, context="_resample_audio") - source_sr = max(1, int(source_sr)) - target_sr = max(1, int(target_sr)) - - if source_sr == target_sr: - return np.array(audio, dtype=np.float32) - - factor = float(target_sr) / float(source_sr) - target_len = max(1, int(round(audio.shape[0] * factor))) - return self._stretch_to_length(audio, target_len) - - def _stretch_to_length(self, audio: np.ndarray, target_len: int) -> np.ndarray: - """Estira o comprime audio a una longitud especifica. - - Usa scipy.signal.resample_poly si esta disponible (mejor calidad con anti-aliasing), - sino scipy.signal.resample (FFT-based), sino librosa.resample, sino np.interp como fallback. - - Args: - audio: Array de audio (samples, channels) - target_len: Longitud objetivo en samples - - Returns: - Audio estirado/comprimido - """ - # Validaciones defensivas - audio = self._validate_audio_array(audio, context="_stretch_to_length") - target_len = max(1, int(target_len)) - - # Validacion adicional: si el audio esta vacio o target_len es 0, retornar silencio - if audio.size == 0 or target_len == 0: - logger.warning("_stretch_to_length: audio vacio o target_len=0, retornando silencio de longitud %d", target_len) - return np.zeros((target_len, 2), dtype=np.float32) - - if audio.shape[0] == target_len: - return np.array(audio, dtype=np.float32) - - # Caso edge: array de 1 sample - if audio.shape[0] <= 1: - return np.repeat(np.asarray(audio, dtype=np.float32), target_len, axis=0) - - original_len = audio.shape[0] - - def _fit_channel_length(channel_audio: np.ndarray) -> np.ndarray: - fitted = np.asarray(channel_audio, dtype=np.float32).reshape(-1) - current_len = fitted.shape[0] - if current_len == target_len: - return fitted - if current_len > target_len: - return fitted[:target_len] - if current_len <= 0: - return np.zeros(target_len, dtype=np.float32) - pad_value = float(fitted[-1]) - padding = np.full(target_len - current_len, pad_value, dtype=np.float32) - return np.concatenate([fitted, padding], axis=0) - - # Intentar usar scipy.signal.resample_poly (mejor calidad con anti-aliasing) - if scipy_signal is not None: - try: - from fractions import Fraction - # Calcular ratio como fraccion simplificada - ratio = Fraction(target_len, original_len).limit_denominator(1000) - up = ratio.numerator - down = ratio.denominator - - stretched = np.zeros((target_len, audio.shape[1]), dtype=np.float32) - for channel in range(audio.shape[1]): - # resample_poly usa filtros anti-aliasing para mejor calidad - resampled = scipy_signal.resample_poly(audio[:, channel], up, down) - stretched[:, channel] = _fit_channel_length(resampled) - return stretched - except Exception as exc: - logger.debug("scipy.signal.resample_poly fallo: %s, intentando resample normal", exc) - # Fallback a resample normal dentro del mismo bloque - try: - stretched = np.zeros((target_len, audio.shape[1]), dtype=np.float32) - for channel in range(audio.shape[1]): - # resample usa FFT para mejor calidad que interpolacion lineal - stretched[:, channel] = scipy_signal.resample( - audio[:, channel], target_len - ).astype(np.float32) - return stretched - except Exception as exc2: - logger.debug("scipy.signal.resample fallo: %s, usando fallback", exc2) - - # Intentar usar librosa.resample (buena calidad) - if librosa is not None: - try: - # librosa.resample requiere sample rates originales y destino - # Usamos valores ficticios que producen el ratio correcto - orig_sr = original_len - target_sr = target_len - - stretched = np.zeros((target_len, audio.shape[1]), dtype=np.float32) - for channel in range(audio.shape[1]): - resampled = librosa.resample( - audio[:, channel], - orig_sr=orig_sr, - target_sr=target_sr, - res_type="linear" # Mas rapido, pero mejor que np.interp puro - ) - stretched[:, channel] = _fit_channel_length(resampled) - return stretched - except Exception as exc: - logger.debug("librosa.resample fallo: %s, usando np.interp", exc) - - # Fallback: np.interp (interpolacion lineal - menor calidad) - source_x = np.linspace(0.0, 1.0, original_len, endpoint=True) - target_x = np.linspace(0.0, 1.0, target_len, endpoint=True) - stretched = np.zeros((target_len, audio.shape[1]), dtype=np.float32) - for channel in range(audio.shape[1]): - stretched[:, channel] = np.interp(target_x, source_x, audio[:, channel]).astype(np.float32) - return stretched - - def _normalize(self, audio: np.ndarray, peak: float = None, soft_limit: bool = True) -> np.ndarray: - """Normaliza el pico del audio con soft limiting mejorado. - - Phase 1 Improvements: - - Soft knee con curva cubica suave (mas natural que lineal) - - Mejor preservacion de dinamica en el rango normal - - Args: - audio: Array de audio - peak: Nivel de pico objetivo (0.01 - 1.0). Por defecto usa _DEFAULT_PEAK (0.85). - soft_limit: Si True, aplica soft knee con curva cubica. - - Returns: - Audio normalizado - """ - # Usar valor por defecto unificado si no se especifica - if peak is None: - peak = self._DEFAULT_PEAK - - # Validacion defensiva - if audio is None or audio.size == 0: - return audio - - audio = np.asarray(audio, dtype=np.float32, copy=True) - peak = max(0.01, min(1.0, float(peak))) - - current_peak = float(np.max(np.abs(audio))) if audio.size else 0.0 - if current_peak <= 1e-6: - return audio - - # Aplicar soft limiting mejorado si esta habilitado - if soft_limit: - # Soft knee con curva cubica: mas suave que lineal, menos agresivo que tanh - # La curva cubica preserva mas dinamica en el rango normal - knee_start = peak * 0.75 # Knee empieza al 75% del peak - - abs_audio = np.abs(audio) - mask = abs_audio > knee_start - - if np.any(mask): - sign = np.sign(audio) - # Calcular posicion relativa dentro del knee (0 a 1) - knee_range = peak - knee_start - over_knee = abs_audio[mask] - knee_start - relative_pos = np.clip(over_knee / knee_range, 0.0, 1.0) - - # Curva cubica: (1 - (1-x)^3) para compresion suave - # Esto da una curva que empieza gradual y se aplane hacia el peak - compression_factor = 1.0 - np.power(1.0 - relative_pos, 3.0) - - # Aplicar compresion manteniendo la senal por debajo del peak - compressed = knee_start + knee_range * compression_factor - audio[mask] = sign[mask] * compressed - - # Recalcular peak despues del soft limiting - current_peak = float(np.max(np.abs(audio))) if audio.size else 0.0 - if current_peak <= 1e-6: - return audio - - # Normalizar al peak objetivo - return (audio / current_peak) * peak - - def _apply_fade( - self, - audio: np.ndarray, - fade_in_s: float = 0.02, - fade_out_s: float = 0.04, - fade_curve: str = "linear" - ) -> np.ndarray: - """Aplica fade in y fade out al audio. - - Args: - audio: Array de audio - fade_in_s: Duracion del fade in en segundos - fade_out_s: Duracion del fade out en segundos - fade_curve: Tipo de curva ("linear", "logarithmic", "exponential") - - Returns: - Audio con fades aplicados - """ - # Validacion defensiva - if audio is None or audio.size == 0: - return np.zeros((1, 2), dtype=np.float32) - - output = np.array(audio, dtype=np.float32, copy=True) - - # Asegurar 2D - if output.ndim == 1: - output = output.reshape(-1, 1) - - total = output.shape[0] - if total <= 2: - return output - - # Validar y clamp tiempos de fade - fade_in_s = max(0.0, float(fade_in_s)) - fade_out_s = max(0.0, float(fade_out_s)) - - fade_in = min(total, max(0, int(round(fade_in_s * self.sample_rate)))) - fade_out = min(total, max(0, int(round(fade_out_s * self.sample_rate)))) - - # Funcion auxiliar para generar curvas de fade - def _generate_fade_curve(length: int, direction: str) -> np.ndarray: - """Genera curva de fade segun el tipo especificado.""" - if fade_curve == "logarithmic": - # Curva logaritmica: inicio suave, transicion gradual - # Usa curva tipo -cos(0 a pi/2) o equivalente: 1 - e^(-3x) normalizado - x = np.linspace(0.0, 1.0, length, dtype=np.float32) - # Logarithmic-like curve: 1 - exp(-k*x) normalizado - k = 4.0 # Factor de curvatura - curve = (1.0 - np.exp(-k * x)) / (1.0 - np.exp(-k)) - elif fade_curve == "exponential": - # Curva exponencial: inicio rapido, final gradual - x = np.linspace(0.0, 1.0, length, dtype=np.float32) - curve = np.power(x, 2.0) # x^2 para curva exponencial simple - else: - # Linear por defecto - curve = np.linspace(0.0, 1.0, length, dtype=np.float32) - - if direction == "out": - curve = curve[::-1] - return curve.reshape(-1, 1) - - if fade_in > 0: - fade_in_curve = _generate_fade_curve(fade_in, "in") - output[:fade_in] *= fade_in_curve - if fade_out > 0: - fade_out_curve = _generate_fade_curve(fade_out, "out") - output[-fade_out:] *= fade_out_curve - return output - - def _apply_short_crossfade(self, audio: np.ndarray, fade_samples: int = 220, equal_power: bool = True) -> np.ndarray: - """Aplica un crossfade corto (5ms por defecto) en ambos extremos del audio. - - Phase 1 Improvements: - - Crossfades equal-power (sin/cos) para mejor calidad y menos artefactos - - Los crossfades equal-power mantienen la energia constante durante la transicion - - Esto elimina clicks al concatenar segmentos de audio extraidos. - - Args: - audio: Array de audio (samples, channels) - fade_samples: Numero de samples para el fade (220 = ~5ms a 44100Hz) - equal_power: Si True, usa curvas equal-power (sin/cos), sino lineales - - Returns: - Audio con crossfades aplicados - """ - # Validacion defensiva - if audio is None or audio.size == 0: - return np.zeros((1, 2), dtype=np.float32) - - output = np.array(audio, dtype=np.float32, copy=True) - - # Asegurar 2D - if output.ndim == 1: - output = output.reshape(-1, 1) - - total = output.shape[0] - if total <= 4: - return output - - # Clamp fade_samples a rango valido - fade_samples = max(1, min(fade_samples, total // 2)) - - if equal_power: - # Equal-power crossfade: mantiene energia constante - # fade_in = sin(x * pi/2), fade_out = cos(x * pi/2) - x = np.linspace(0.0, 1.0, fade_samples, dtype=np.float32) - fade_in_curve = np.sin(x * np.pi / 2.0).reshape(-1, 1) - fade_out_curve = np.cos(x * np.pi / 2.0).reshape(-1, 1) - else: - # Fallback a curvas lineales - fade_in_curve = np.linspace(0.0, 1.0, fade_samples, dtype=np.float32).reshape(-1, 1) - fade_out_curve = np.linspace(1.0, 0.0, fade_samples, dtype=np.float32).reshape(-1, 1) - - output[:fade_samples] *= fade_in_curve - output[-fade_samples:] *= fade_out_curve - - return output - - def _extract_tail(self, audio: np.ndarray, seconds: float, min_length: float = 0.1) -> np.ndarray: - """Extrae los ultimos N segundos de audio con crossfade corto para eliminar clicks. - - Args: - audio: Array de audio - seconds: Duracion a extraer en segundos - min_length: Longitud minima en segundos (default: 0.1s = 4410 samples) - - Returns: - Segmento de audio extraido con crossfade aplicado - """ - # Validaciones defensivas - audio = self._validate_audio_array(audio, context="_extract_tail") - seconds = max(0.001, float(seconds)) # Al menos 1ms - min_length = max(0.001, float(min_length)) # Al menos 1ms - - samples = max(1, int(round(seconds * self.sample_rate))) - min_samples = max(1, int(round(min_length * self.sample_rate))) - - # Si el audio es muy corto, retornar todo el audio - if audio.shape[0] <= samples: - segment = np.array(audio, dtype=np.float32, copy=True) - # Aplicar crossfade incluso si es todo el audio - return self._apply_short_crossfade(segment, fade_samples=220) - - segment = np.array(audio[-samples:], dtype=np.float32, copy=True) - - # Validar que el segmento no sea muy corto - if segment.shape[0] < min_samples: - logger.warning("_extract_tail: segmento muy corto (%d samples), usando todo el audio disponible", segment.shape[0]) - segment = np.array(audio, dtype=np.float32, copy=True) - - # Aplicar crossfade corto (5ms) para eliminar clicks en el corte - segment = self._apply_short_crossfade(segment, fade_samples=220) - - return segment - - def _extract_center(self, audio: np.ndarray, seconds: float) -> np.ndarray: - """Extrae el centro del audio con crossfades cortos para eliminar clicks. - - Args: - audio: Array de audio - seconds: Duracion a extraer en segundos - - Returns: - Segmento de audio extraido con crossfades aplicados - """ - # Validaciones defensivas - audio = self._validate_audio_array(audio, context="_extract_center") - seconds = max(0.001, float(seconds)) # Al menos 1ms - - samples = max(1, int(round(seconds * self.sample_rate))) - if audio.shape[0] <= samples: - segment = np.array(audio, dtype=np.float32, copy=True) - # Aplicar crossfade incluso si es todo el audio - return self._apply_short_crossfade(segment, fade_samples=220) - - start = max(0, (audio.shape[0] - samples) // 2) - segment = np.array(audio[start:start + samples], dtype=np.float32, copy=True) - - # Aplicar crossfade corto (5ms) en ambos extremos para eliminar clicks - segment = self._apply_short_crossfade(segment, fade_samples=220) - - return segment - - def _find_hot_slice(self, audio: np.ndarray, seconds: float, min_samples: int = -1) -> np.ndarray: - """Encuentra el segmento con mayor energia con crossfades cortos para eliminar clicks. - - Args: - audio: Array de audio - seconds: Duracion del segmento en segundos - min_samples: Longitud minima del resultado en samples (default: 1000) - - Returns: - Segmento de mayor energia con crossfades aplicados - """ - # Validaciones defensivas - audio = self._validate_audio_array(audio, context="_find_hot_slice") - seconds = max(0.001, float(seconds)) # Al menos 1ms - # Usar constante minima de efecto si no se especifica - if min_samples < 0: - min_samples = self._MIN_SAMPLES_FOR_EFFECT - else: - min_samples = max(self._MIN_SAMPLES_FOR_EFFECT, int(min_samples)) - - samples = max(min_samples, int(round(seconds * self.sample_rate))) - if audio.shape[0] <= samples: - # Si el audio es muy corto, paddear a min_samples - if audio.shape[0] < min_samples: - logger.debug("HOT_SLICE: padded short audio from %d to %d samples", audio.shape[0], min_samples) - padding = np.zeros((min_samples - audio.shape[0], audio.shape[1]), dtype=np.float32) - audio = np.concatenate([audio, padding], axis=0) - segment = np.array(audio, dtype=np.float32, copy=True) - # Aplicar crossfade incluso si es todo el audio - return self._apply_short_crossfade(segment, fade_samples=220) - - mono = np.mean(np.abs(audio), axis=1) - window = max(8, samples) - energy = np.convolve(mono, np.ones(window, dtype=np.float32), mode="valid") - - # Handle edge case: energia vacia - if energy.size == 0: - segment = np.array(audio[:samples], dtype=np.float32, copy=True) - # Validar longitud minima - if segment.shape[0] < min_samples: - logger.debug("HOT_SLICE: padded short audio from %d to %d samples (empty energy)", segment.shape[0], min_samples) - padding = np.zeros((min_samples - segment.shape[0], segment.shape[1]), dtype=np.float32) - segment = np.concatenate([segment, padding], axis=0) - return self._apply_short_crossfade(segment, fade_samples=220) - - start = int(np.argmax(energy)) - segment = np.array(audio[start:start + samples], dtype=np.float32, copy=True) - - # Validar longitud minima del resultado - if segment.shape[0] < min_samples: - logger.debug("HOT_SLICE: padded short audio from %d to %d samples (result)", segment.shape[0], min_samples) - padding = np.zeros((min_samples - segment.shape[0], segment.shape[1]), dtype=np.float32) - segment = np.concatenate([segment, padding], axis=0) - - # Aplicar crossfade corto (5ms) en ambos extremos para eliminar clicks - segment = self._apply_short_crossfade(segment, fade_samples=220) - - return segment - - def _apply_short_reverb(self, audio: np.ndarray, decay: float = 0.3, delay_ms: float = 50.0) -> np.ndarray: - """Aplica un reverb corto mediante delays con feedback. - - Simula una respuesta impulsional corta (~100ms) para dar profundidad - al audio invertido sin crear una cola larga. - - Args: - audio: Array de audio (samples, channels) - decay: Factor de decaimiento del reverb (0.0 - 0.8) - delay_ms: Delay base en milisegundos - - Returns: - Audio con reverb aplicado - """ - # Validaciones defensivas - audio = self._validate_audio_array(audio, context="_apply_short_reverb") - decay = max(0.0, min(0.8, float(decay))) - delay_ms = max(5.0, min(200.0, float(delay_ms))) - - output = np.array(audio, dtype=np.float32, copy=True) - total_samples = output.shape[0] - - # Calcular samples de delay base - delay_samples = int(round(delay_ms * self.sample_rate / 1000.0)) - if delay_samples < 1 or total_samples < delay_samples + 1: - return output - - # Crear multiples taps de delay para simular reverb - # Taps con diferentes tiempos y ganancias - taps = [ - (1, 1.0, decay * 0.6), # 1er eco temprano - (int(delay_samples * 1.3), 0.9, decay * 0.4), # 2do eco - (int(delay_samples * 1.7), 0.85, decay * 0.3), # 3er eco - (int(delay_samples * 2.2), 0.8, decay * 0.2), # 4to eco (difuso) - ] - - for delay, gain, feedback in taps: - if delay >= total_samples: - continue - # Aplicar delay con feedback - delayed = np.zeros_like(output) - delayed[delay:] = output[:-delay] * gain * feedback - output = output + delayed - - # Mezclar wet/dry (30% wet) - wet = output * 0.3 - dry = audio * 0.7 - result = dry + wet - - # Normalizar para evitar clipping - max_val = np.max(np.abs(result)) - if max_val > 0.95: - result = result * (0.95 / max_val) - - return result.astype(np.float32) - - def _apply_delay_feedback( - self, - audio: np.ndarray, - delay_ms: float = 150.0, - feedback: float = 0.35, - mix: float = 0.25, - num_taps: int = 3 - ) -> np.ndarray: - """Aplica delay con feedback sutil para anadir profundidad y textura. - - Crea repeticiones que decaen gradualmente, ideal para reverse FX. - - Args: - audio: Array de audio (samples, channels) - delay_ms: Tiempo entre repeticiones en milisegundos (default: 150ms) - feedback: Factor de decaimiento por repeticion (0.0 - 0.7, default: 0.35) - mix: Nivel de la senal wet (0.0 - 0.5, default: 0.25) - num_taps: Numero de repeticiones (1-5, default: 3) - - Returns: - Audio con delay aplicado - """ - # Validaciones defensivas - audio = self._validate_audio_array(audio, context="_apply_delay_feedback") - delay_ms = max(10.0, min(500.0, float(delay_ms))) - feedback = max(0.0, min(0.7, float(feedback))) - mix = max(0.0, min(0.5, float(mix))) - num_taps = max(1, min(5, int(num_taps))) - - output = np.zeros_like(audio, dtype=np.float32) - total_samples = audio.shape[0] - delay_samples = int(round(delay_ms * self.sample_rate / 1000.0)) - - # Validar que hay suficiente espacio para el delay - if delay_samples < 1 or total_samples < delay_samples + 1: - return np.array(audio, dtype=np.float32) - - # Copiar la senal dry - output = np.array(audio, dtype=np.float32, copy=True) - - # Anadir taps de delay con feedback decreciente - current_gain = feedback - for tap in range(1, num_taps + 1): - tap_delay = delay_samples * tap - if tap_delay >= total_samples: - break - - # Crear senal delayada con gain decreciente - delayed = np.zeros_like(audio) - delayed[tap_delay:] = audio[:-tap_delay] * current_gain - - # Mezclar con output - output = output + delayed - - # Reducir gain para siguiente tap - current_gain *= feedback - - # Mezclar wet/dry - dry = audio * (1.0 - mix) - wet = output * mix - result = dry + wet - - # Normalizar para evitar clipping - max_val = np.max(np.abs(result)) - if max_val > 0.95: - result = result * (0.95 / max_val) - - return result.astype(np.float32) - - def _apply_hpf(self, audio: np.ndarray, cutoff_hz: float = 100.0) -> np.ndarray: - """Aplica un filtro high-pass para limpiar frecuencias bajas (mud). - - Usa scipy.signal.butter si esta disponible, sino una aproximacion - por diferenciacion de primer orden. - - Args: - audio: Array de audio (samples, channels) - cutoff_hz: Frecuencia de corte en Hz (tipica: 80-120 Hz) - - Returns: - Audio filtrado - """ - # Validaciones defensivas - audio = self._validate_audio_array(audio, context="_apply_hpf") - cutoff_hz = max(20.0, min(500.0, float(cutoff_hz))) - - output = np.zeros_like(audio, dtype=np.float32) - num_channels = audio.shape[1] - total_samples = audio.shape[0] - - # Intentar usar scipy para mejor calidad - if scipy_signal is not None: - try: - # Filtro Butterworth high-pass de 2do orden - nyquist = self.sample_rate / 2.0 - normalized_cutoff = min(0.49, cutoff_hz / nyquist) # Evitar Nyquist - b, a = scipy_signal.butter(2, normalized_cutoff, btype='high', analog=False) - for ch in range(num_channels): - output[:, ch] = scipy_signal.filtfilt(b, a, audio[:, ch]).astype(np.float32) - return output - except Exception as exc: - logger.debug("scipy HPF fallo: %s, usando fallback por diferenciacion", exc) - - # Fallback: filtro high-pass por diferenciacion (RC) - rc = 1.0 / (2.0 * 3.14159265359 * cutoff_hz) - dt = 1.0 / self.sample_rate - alpha = rc / (rc + dt) - - for ch in range(num_channels): - prev_input = 0.0 - prev_output = 0.0 - for i in range(total_samples): - current_input = float(audio[i, ch]) - output[i, ch] = alpha * (prev_output + current_input - prev_input) - prev_input = current_input - prev_output = float(output[i, ch]) - - return output.astype(np.float32) - - def _apply_hpf_sweep(self, audio: np.ndarray, start_hz: float = 200.0, end_hz: float = 2000.0) -> np.ndarray: - """Aplica un HPF sweep que va desde start_hz hasta end_hz. - - Phase 1 Improvements: - - Filtro Butterworth de 4to orden para pendientes mas pronunciadas (24dB/oct) - - Overlap-add mejorado con 75% overlap para transiciones mas suaves - - Normalizacion de ventana para evitar artefactos de amplitud - - El filtro high-pass barre su frecuencia de corte a lo largo del audio, - creando el clasico efecto de "sweep" usado en risers. - - Args: - audio: Array de audio (samples, channels) - start_hz: Frecuencia inicial del HPF (default 200Hz) - end_hz: Frecuencia final del HPF (default 2000Hz) - - Returns: - Audio con HPF sweep aplicado - """ - # Validaciones defensivas - audio = self._validate_audio_array(audio, context="_apply_hpf_sweep") - start_hz = max(20.0, min(float(start_hz), self.sample_rate / 2.0 - 100)) - end_hz = max(start_hz, min(float(end_hz), self.sample_rate / 2.0 - 100)) - - # Sin scipy, devolver audio sin cambios - if scipy_signal is None: - logger.debug("scipy_signal no disponible, saltando HPF sweep") - return np.array(audio, dtype=np.float32) - - total_samples = audio.shape[0] - output = np.zeros_like(audio, dtype=np.float32) - - # Procesar en frames con overlap para evitar glitches - # Frames mas pequenos (25ms) con 75% overlap para transiciones mas suaves - frame_size = int(0.025 * self.sample_rate) # 25ms frames - hop_size = frame_size // 4 # 75% overlap - num_frames = max(1, (total_samples - frame_size) // hop_size + 1) - - # Ventana de Hann para overlap-add - window = np.hanning(frame_size).astype(np.float32) - - # Buffer para normalizacion de overlap - window_sum = np.zeros(total_samples, dtype=np.float32) - - for i in range(num_frames): - start_sample = i * hop_size - end_sample = min(start_sample + frame_size, total_samples) - - # Frecuencia de corte para este frame (interpolacion exponencial) - progress = i / max(1, num_frames - 1) - cutoff_hz = start_hz * (end_hz / start_hz) ** progress - - # Extraer frame - frame = audio[start_sample:end_sample] - actual_frame_size = frame.shape[0] - - if actual_frame_size < frame_size: - # Padding si es el ultimo frame - padded = np.zeros((frame_size, audio.shape[1]), dtype=np.float32) - padded[:actual_frame_size] = frame - frame = padded - actual_window = window.copy() - actual_window[actual_frame_size:] = 0.0 - else: - actual_window = window - - # Aplicar HPF Butterworth de 4to orden (24dB/octava) - try: - nyquist = self.sample_rate / 2.0 - normalized_cutoff = min(0.49, cutoff_hz / nyquist) - - # Filtro de 4to orden para pendiente mas pronunciada - b, a = scipy_signal.butter(4, normalized_cutoff, btype="high", output="ba") - - # Aplicar filtro a cada canal con filtfilt para fase cero - filtered = np.zeros_like(frame) - for ch in range(frame.shape[1]): - filtered[:, ch] = scipy_signal.filtfilt(b, a, frame[:, ch]) - - # Aplicar ventana - windowed = filtered * actual_window.reshape(-1, 1) - - # Acumular en output (overlap-add) - out_len = min(actual_frame_size, total_samples - start_sample) - output[start_sample:start_sample + out_len] += windowed[:out_len] - window_sum[start_sample:start_sample + out_len] += actual_window[:out_len] ** 2 - - except Exception as exc: - logger.debug("Error en HPF sweep frame %d: %s", i, exc) - # Fallback: copiar frame con ventana - windowed = frame * actual_window.reshape(-1, 1) - out_len = min(actual_frame_size, total_samples - start_sample) - output[start_sample:start_sample + out_len] += windowed[:out_len] - window_sum[start_sample:start_sample + out_len] += actual_window[:out_len] ** 2 - - # Normalizar por la suma de ventanas para compensar overlap - window_sum = np.maximum(window_sum, 1e-8) - output = output / window_sum.reshape(-1, 1) - - return output.astype(np.float32) - - def _apply_saturator(self, audio: np.ndarray, drive: float = 0.3) -> np.ndarray: - """Aplica saturacion suave usando tanh. - - La saturacion tanh simula el comportamiento de equipos analogicos, - anadiendo harmonicos de forma musical y suavizando los picos. - - Args: - audio: Array de audio (samples, channels) - drive: Cantidad de saturacion (0.0 - 1.0, default 0.3) - - Returns: - Audio saturado - """ - # Validaciones defensivas - audio = self._validate_audio_array(audio, context="_apply_saturator") - drive = max(0.0, min(1.0, float(drive))) - - if drive <= 0.001: - return np.array(audio, dtype=np.float32) - - # Saturacion suave usando tanh - gain = 1.0 + drive - saturated = np.tanh(audio * gain) / gain - - return saturated.astype(np.float32) - - def _render_reverse_fx(self, source_path: str, duration_s: float = 4.0, project_bpm: float = 120.0) -> np.ndarray: - """Renderiza efecto de reverse profesional mejorado. - - Incluye: - - Reverb profundo antes del reverse - - HPF agresivo para limpiar mud - - Swell exponencial dramatico - - Delay feedback sutil - - Fade-in con curva logaritmica natural - - Integracion con BPM del proyecto - - Args: - source_path: Ruta al archivo fuente - duration_s: Duracion en segundos - project_bpm: BPM del proyecto para sincronizacion (default: 120.0) - - Returns: - Audio procesado con reverse FX profesional - """ - # Validaciones defensivas - duration_s = max(0.1, float(duration_s)) - project_bpm = max(60.0, min(200.0, float(project_bpm or 120.0))) - logger.debug( - "Rendering REVERSE FX: source=%s, duration=%.1fs, bpm=%.0f", - Path(source_path).name, duration_s, project_bpm - ) - - # Largar y preparar segmento - audio, _ = self._load_audio(source_path) - # Usar constante minima para efecto - min_tail_duration = self._MIN_SAMPLES_FOR_EFFECT / self.sample_rate - tail_duration = max(min_tail_duration, duration_s * 0.85) - if tail_duration == min_tail_duration: - logger.debug("Using minimum tail duration %.3fs for short audio in reverse", min_tail_duration) - segment = self._extract_tail(audio, tail_duration) - reversed_audio = np.flip(segment, axis=0) - reversed_audio = self._stretch_to_length(reversed_audio, int(round(duration_s * self.sample_rate))) - - # 1. Aplicar reverb PROFUNDO para dar cuerpo antes del reverse - # Decay mas alto (0.55) y delay mas largo (90ms) para profundidad - reversed_audio = self._apply_short_reverb(reversed_audio, decay=0.55, delay_ms=90.0) - - # 2. HPF AGRESIVO para limpiar mud en frecuencias bajas - # Subir de 100Hz a 180Hz para reverse mas limpio y brillante - reversed_audio = self._apply_hpf(reversed_audio, cutoff_hz=180.0) - - # 3. Aplicar SWELL EXPONENCIAL DRAMATICO - # Usar ramp exponencial de volumen para build-up dramatico - length = reversed_audio.shape[0] - # Curva exponencial: comienza muy bajo y crece dramaticamente - # El factor 5.0 da un rango de ~-14dB a 0dB - swell_ramp = np.exp(np.linspace(np.log(0.05), np.log(1.0), length, dtype=np.float32)).reshape(-1, 1) - reversed_audio = reversed_audio * swell_ramp - - # 4. Aplicar DELAY FEEDBACK SUTIL para textura y espacio - # Delay sincronizado con BPM (1/8 de nota = 60*1000/(bpm*2) ms) - delay_ms_sync = (60000.0 / project_bpm) / 2.0 # 1/8 de nota - reversed_audio = self._apply_delay_feedback( - reversed_audio, - delay_ms=delay_ms_sync, - feedback=0.3, - mix=0.2, - num_taps=2 - ) - - # 5. Fade-in con CURVA LOGARITMICA para transicion natural - # Fade-in mas largo (0.4s) con curva logaritmica - reversed_audio = self._apply_fade( - reversed_audio, - fade_in_s=0.4, - fade_out_s=0.05, - fade_curve="logarithmic" - ) - - result = self._normalize(reversed_audio) - - final_duration = len(result) / self.sample_rate - logger.debug("REVERSE_FX: generated %s (duration=%.1fs)", Path(source_path).name, final_duration) - return result - - def _render_riser(self, source_path: str, duration_s: float = 8.0, bpm: float = 128.0) -> np.ndarray: - """Renderiza efecto de riser profesional con HPF sweep, ramp exponencial con plateau, y saturacion mejorada. - - Phase 1 Improvements: - - BPM-synced for better musical timing - - Longer plateau before the peak for sustain - - Enhanced HPF sweep curve (80Hz -> 3500Hz for more dramatic sweep) - - Added mid-frequency boost for presence - - Better saturation curve with progressive drive - - Longer sustain before final peak - - Args: - source_path: Ruta al archivo fuente - duration_s: Duracion en segundos - bpm: BPM del proyecto para sincronizacion (default: 128.0) - - Returns: - Audio procesado - """ - duration_s = max(0.1, float(duration_s)) - bpm = max(60.0, min(200.0, float(bpm or 128.0))) - logger.debug("Rendering RISER FX: source=%s, duration=%.1fs, bpm=%.0f", Path(source_path).name, duration_s, bpm) - - audio, _ = self._load_audio(source_path) - min_source_duration = self._MIN_SAMPLES_FOR_EFFECT / self.sample_rate - beat_duration = 60.0 / bpm - source_duration = max(min_source_duration, min(beat_duration * 4.0, duration_s / 3.5)) - if source_duration == min_source_duration: - logger.debug("Using minimum source duration %.3fs for short audio in riser", min_source_duration) - segment = self._extract_center(audio, source_duration) - - stages: List[np.ndarray] = [] - for speed in (1.0, 0.88, 0.75, 0.62): - target_len = max(self._MIN_SAMPLES_FOR_STRETCH, int(round(segment.shape[0] * speed))) - sped = self._stretch_to_length(segment, target_len) - stages.append(sped) - combined = np.concatenate(stages, axis=0) - combined = self._stretch_to_length(combined, int(round(duration_s * self.sample_rate))) - - num_samples = combined.shape[0] - logger.debug("RISER: Applying enhanced HPF sweep 80Hz -> 3500Hz") - combined = self._apply_hpf_sweep(combined, start_hz=80.0, end_hz=3500.0) - - t = np.linspace(0.0, 1.0, num_samples, dtype=np.float32) - plateau_start = 0.82 - plateau_end = 0.95 - - ramp = np.zeros(num_samples, dtype=np.float32) - ramp_phase = t[t <= plateau_start] - if len(ramp_phase) > 0: - ramp_indices = t <= plateau_start - exp_ramp = np.exp(np.linspace(np.log(0.03), np.log(0.92), ramp_indices.sum())) - ramp[ramp_indices] = exp_ramp - - plateau_mask = (t > plateau_start) & (t <= plateau_end) - if np.any(plateau_mask): - ramp[plateau_mask] = np.linspace(0.92, 0.98, plateau_mask.sum()) - - final_ramp_mask = t > plateau_end - if np.any(final_ramp_mask): - ramp[final_ramp_mask] = np.linspace(0.98, 1.0, final_ramp_mask.sum()) - - ramp = ramp.reshape(-1, 1) - combined = combined * ramp - - saturation_start = int(num_samples * 0.65) - tail = combined[saturation_start:].copy() - - logger.debug("RISER: Applying progressive saturation to tail (last 35%%)") - saturation_sections = [ - (0.0, 0.3, 0.15), - (0.3, 0.6, 0.25), - (0.6, 1.0, 0.35), - ] - - for start_ratio, end_ratio, drive in saturation_sections: - sect_start = int(tail.shape[0] * start_ratio) - sect_end = int(tail.shape[0] * end_ratio) - if sect_end > sect_start: - tail[sect_start:sect_end] = self._apply_saturator(tail[sect_start:sect_end], drive=drive) - - crossfade_len = min(int(0.015 * self.sample_rate), tail.shape[0]) - if crossfade_len > 0: - fade_curve = np.sin(np.linspace(0, np.pi/2, crossfade_len, dtype=np.float32)).reshape(-1, 1) - saturated_full = self._apply_saturator(tail, drive=0.28) - tail[:crossfade_len] = tail[:crossfade_len] * (1 - fade_curve) + saturated_full[:crossfade_len] * fade_curve - - combined[saturation_start:] = tail - - combined = self._apply_fade(combined, fade_in_s=0.08, fade_out_s=0.04) - result = self._normalize(combined, peak=0.85) - - final_duration = len(result) / self.sample_rate - logger.debug("RISER: generated %s (duration=%.1fs)", Path(source_path).name, final_duration) - return result - - def _apply_lpf_simple(self, audio: np.ndarray, cutoff_hz: float) -> np.ndarray: - """Aplica filtro low-pass simple (media movil exponencial). - - Args: - audio: Array de audio (samples, channels) - cutoff_hz: Frecuencia de corte en Hz - - Returns: - Audio filtrado - """ - audio = self._validate_audio_array(audio, context="_apply_lpf_simple") - cutoff_hz = max(20.0, min(20000.0, float(cutoff_hz))) - - # Constante de tiempo para el filtro RC - rc = 1.0 / (2.0 * 3.14159 * cutoff_hz) - dt = 1.0 / self.sample_rate - alpha = dt / (rc + dt) - - output = np.zeros_like(audio) - for ch in range(audio.shape[1]): - output[0, ch] = audio[0, ch] - for i in range(1, len(audio)): - output[i, ch] = output[i - 1, ch] + alpha * (audio[i, ch] - output[i - 1, ch]) - - return output.astype(np.float32) - - def _apply_lpf_sweep(self, audio: np.ndarray, start_hz: float = 8000.0, end_hz: float = 200.0) -> np.ndarray: - """Aplica barrido de filtro low-pass a lo largo del audio. - - Phase 1 Improvements: - - Filtro Butterworth de 4to orden para pendientes mas pronunciadas (24dB/oct) - - Overlap-add con 75% overlap para transiciones suaves - - Normalizacion de ventana para evitar artefactos de amplitud - - Fallback a filtro RC simple si scipy no disponible - - Args: - audio: Array de audio (samples, channels) - start_hz: Frecuencia inicial del sweep en Hz - end_hz: Frecuencia final del sweep en Hz - - Returns: - Audio con LPF sweep aplicado - """ - audio = self._validate_audio_array(audio, context="_apply_lpf_sweep") - start_hz = max(50.0, min(20000.0, float(start_hz))) - end_hz = max(20.0, min(20000.0, float(end_hz))) - - num_samples = audio.shape[0] - - # Si scipy disponible, usar Butterworth 4to orden con overlap-add - if scipy_signal is not None: - output = np.zeros_like(audio, dtype=np.float32) - - # Frames de 25ms con 75% overlap - frame_size = int(0.025 * self.sample_rate) - hop_size = frame_size // 4 # 75% overlap - num_frames = max(1, (num_samples - frame_size) // hop_size + 1) - - window = np.hanning(frame_size).astype(np.float32) - window_sum = np.zeros(num_samples, dtype=np.float32) - - for i in range(num_frames): - start_sample = i * hop_size - end_sample = min(start_sample + frame_size, num_samples) - - # Interpolacion exponencial de la frecuencia (mas musical) - progress = start_sample / num_samples - exp_progress = (np.exp(progress * 2.0) - 1.0) / (np.e ** 2.0 - 1.0) - cutoff = start_hz * (end_hz / start_hz) ** exp_progress - - frame = audio[start_sample:end_sample] - actual_frame_size = frame.shape[0] - - if actual_frame_size < frame_size: - padded = np.zeros((frame_size, audio.shape[1]), dtype=np.float32) - padded[:actual_frame_size] = frame - frame = padded - actual_window = window.copy() - actual_window[actual_frame_size:] = 0.0 - else: - actual_window = window - - try: - nyquist = self.sample_rate / 2.0 - normalized_cutoff = min(0.49, max(0.01, cutoff / nyquist)) - - # Butterworth 4to orden - b, a = scipy_signal.butter(4, normalized_cutoff, btype="low", output="ba") - - filtered = np.zeros_like(frame) - for ch in range(frame.shape[1]): - filtered[:, ch] = scipy_signal.filtfilt(b, a, frame[:, ch]) - - windowed = filtered * actual_window.reshape(-1, 1) - out_len = min(actual_frame_size, num_samples - start_sample) - output[start_sample:start_sample + out_len] += windowed[:out_len] - window_sum[start_sample:start_sample + out_len] += actual_window[:out_len] ** 2 - - except Exception as exc: - logger.debug("Error en LPF sweep frame %d: %s", i, exc) - windowed = frame * actual_window.reshape(-1, 1) - out_len = min(actual_frame_size, num_samples - start_sample) - output[start_sample:start_sample + out_len] += windowed[:out_len] - window_sum[start_sample:start_sample + out_len] += actual_window[:out_len] ** 2 - - # Normalizar por suma de ventanas - window_sum = np.maximum(window_sum, 1e-8) - output = output / window_sum.reshape(-1, 1) - return output.astype(np.float32) - - # Fallback: filtro RC simple por bloques - output = np.zeros_like(audio) - block_size = max(256, num_samples // 64) - num_blocks = (num_samples + block_size - 1) // block_size - - for block_idx in range(num_blocks): - start_sample = block_idx * block_size - end_sample = min(start_sample + block_size, num_samples) - - progress = start_sample / num_samples - exp_progress = (np.exp(progress * 2.0) - 1.0) / (np.e ** 2.0 - 1.0) - cutoff = start_hz * (end_hz / start_hz) ** exp_progress - - block_audio = audio[start_sample:end_sample] - filtered_block = self._apply_lpf_simple(block_audio, cutoff) - output[start_sample:end_sample] = filtered_block - - return output.astype(np.float32) - - def _apply_simple_reverb(self, audio: np.ndarray, decay: float = 0.3, wet_mix: float = 0.15, delay_ms: float = 50.0) -> np.ndarray: - """Aplica reverb simple con multiples delays. - - Args: - audio: Array de audio (samples, channels) - decay: Factor de decaimiento (0.0 - 0.9) - wet_mix: Mezcla de senal procesada (0.0 - 1.0) - delay_ms: Delay base en milisegundos - - Returns: - Audio con reverb aplicado - """ - audio = self._validate_audio_array(audio, context="_apply_simple_reverb") - decay = max(0.0, min(0.9, float(decay))) - wet_mix = max(0.0, min(1.0, float(wet_mix))) - delay_ms = max(1.0, min(200.0, float(delay_ms))) - - output = np.array(audio, dtype=np.float32, copy=True) - delay_samples = int(round(delay_ms * self.sample_rate / 1000.0)) - - # Multiples delays para crear reverb mas denso - delay_times = [1.0, 1.3, 1.7, 2.1] # Proporciones del delay base - decay_factors = [decay, decay * 0.7, decay * 0.5, decay * 0.3] - - for delay_ratio, decay_factor in zip(delay_times, decay_factors): - current_delay = int(round(delay_samples * delay_ratio)) - if current_delay < audio.shape[0]: - delayed = np.zeros_like(output) - delayed[current_delay:] = output[:-current_delay] * decay_factor - output = output + delayed - - # Mezclar dry y wet - dry_mix = 1.0 - wet_mix - return (audio * dry_mix + output * wet_mix).astype(np.float32) - - def _render_downlifter(self, source_path: str, duration_s: float = 6.0, bpm: float = 128.0) -> np.ndarray: - """Renderiza efecto de downlifter profesional con LPF sweep mejorado y reverb tail extendido. - - Phase 1 Improvements: - - BPM-synced for better musical timing - - Longer reverb tail with layered decay (up to 60% of duration) - - Enhanced LPF sweep curve (15000Hz -> 60Hz for more dramatic effect) - - Added subtle noise floor for depth - - Improved grain texture with BPM-synced rhythm - - Better volume envelope with Hz-tuned amplitude curve - - Args: - source_path: Ruta al archivo fuente - duration_s: Duracion en segundos - bpm: BPM del proyecto para sincronizar curvas - - Returns: - Audio procesado - """ - duration_s = max(0.1, float(duration_s)) - bpm = max(60.0, min(200.0, float(bpm or 128.0))) - logger.debug("Rendering DOWNLIFTER FX: source=%s, duration=%.1fs, bpm=%.1f", Path(source_path).name, duration_s, bpm) - - audio, _ = self._load_audio(source_path) - min_segment_duration = self._MIN_SAMPLES_FOR_EFFECT / self.sample_rate - beat_duration = 60.0 / bpm - segment_duration = max(min_segment_duration, min(beat_duration * 3.0, duration_s / 2.5)) - if segment_duration == min_segment_duration: - logger.debug("Using minimum segment duration %.3fs for short audio in downlifter", min_segment_duration) - segment = self._extract_tail(audio, segment_duration) - stretched = self._stretch_to_length(segment, int(round(duration_s * self.sample_rate))) - - num_samples = stretched.shape[0] - - t = np.linspace(0.0, 1.0, num_samples, dtype=np.float32) - - exp_decay = np.exp(-3.5 * t) - s_curve_start = 0.55 - s_mask = (t > s_curve_start).astype(np.float32) - s_t = (t - s_curve_start) / (1.0 - s_curve_start) - s_curve = 1.0 - (3.0 * s_t**2 - 2.0 * s_t**3) - - volume_curve = exp_decay * (1.0 - s_mask) + (exp_decay * s_curve) * s_mask - volume_curve = volume_curve * 0.97 + 0.03 - volume_curve = volume_curve.reshape(-1, 1) - stretched = stretched * volume_curve - - logger.debug("DOWNLIFTER: Applying enhanced LPF sweep 15000Hz -> 60Hz") - stretched = self._apply_lpf_sweep(stretched, start_hz=15000.0, end_hz=60.0) - - grain_rate_hz = bpm / 60.0 * 4.0 - grain_period = max(16, int(round(self.sample_rate / grain_rate_hz))) - grain_envelope = np.ones(num_samples, dtype=np.float32) - grain_depth = 0.025 - - grain_start = int(num_samples * 0.45) - for i in range(grain_start, num_samples, grain_period): - grain_samples = min(grain_period, num_samples - i) - if grain_samples <= 0: - continue - phase = np.linspace(0, np.pi * 2, min(grain_samples, grain_period), dtype=np.float32) - grain_wave = (np.sin(phase) * 0.5 + 0.5) * grain_depth - progress = (i - grain_start) / max(1, num_samples - grain_start) - grain_wave *= (1.0 + progress * 0.6) - end_idx = min(i + grain_samples, num_samples) - apply_len = min(len(grain_wave), end_idx - i) - if apply_len > 0: - grain_envelope[i:i + apply_len] = grain_envelope[i:i + apply_len] * (1.0 - grain_wave[:apply_len]) - - grain_envelope = grain_envelope.reshape(-1, 1) - stretched = stretched * grain_envelope - - tail_start = int(num_samples * 0.48) - tail = stretched[tail_start:].copy() - - tail_with_reverb = self._apply_simple_reverb( - tail, - decay=0.6, - wet_mix=0.4, - delay_ms=30.0 - ) - - tail_with_reverb = self._apply_simple_reverb( - tail_with_reverb, - decay=0.45, - wet_mix=0.18, - delay_ms=65.0 - ) - - if tail_with_reverb.shape[0] > 0: - layer_depth_start = int(tail_with_reverb.shape[0] * 0.6) - depth_layer = tail_with_reverb[layer_depth_start:].copy() - if depth_layer.shape[0] > 0: - depth_layer = self._apply_simple_reverb(depth_layer, decay=0.35, wet_mix=0.12, delay_ms=100.0) - tail_with_reverb[layer_depth_start:] = depth_layer - - stretched = np.concatenate([stretched[:tail_start], tail_with_reverb], axis=0) - - fade_duration_s = min(1.4, duration_s * 0.28) - fade_samples = int(round(fade_duration_s * self.sample_rate)) - - if fade_samples > 0 and fade_samples < stretched.shape[0]: - fade_start = stretched.shape[0] - fade_samples - fade_t = np.linspace(0.0, 1.0, fade_samples, dtype=np.float32) - fade_curve = np.log1p(-fade_t * 0.95 + 0.05) / np.log(0.05) - fade_curve = np.clip(fade_curve, 0.0, 1.0) - fade_curve = fade_curve ** 0.65 - stretched[fade_start:] = stretched[fade_start:] * fade_curve.reshape(-1, 1) - - stretched = self._apply_fade(stretched, fade_in_s=0.02, fade_out_s=0.0) - result = self._normalize(stretched, peak=0.82) - - final_duration = len(result) / self.sample_rate - logger.debug("DOWNLIFTER: generated %s (duration=%.1fs)", Path(source_path).name, final_duration) - return result - - def _apply_slice_window(self, audio: np.ndarray, fade_samples: int = 44) -> np.ndarray: - """Aplica ventana con fade in/out muy corto a cada slice para evitar clicks. - - Args: - audio: Array de audio (samples, channels) - fade_samples: Numero de samples para el fade (default: 44 = ~1ms a 44.1kHz) - - Returns: - Audio con ventana aplicada - """ - if audio is None or audio.size == 0: - return audio - - audio = np.asarray(audio, dtype=np.float32) - if audio.ndim == 1: - audio = audio.reshape(-1, 1) - - total = audio.shape[0] - if total <= fade_samples * 2: - # Si el slice es muy corto, aplicar ventana completa tipo Hanning - window = np.hanning(total) - return audio * window.reshape(-1, 1) - - # Crear ventana: fade in al inicio, fade out al final - window = np.ones(total, dtype=np.float32) - window[:fade_samples] = np.linspace(0.0, 1.0, fade_samples, dtype=np.float32) - window[-fade_samples:] = np.linspace(1.0, 0.0, fade_samples, dtype=np.float32) - - return audio * window.reshape(-1, 1) - - def _render_stutter(self, source_path: str, duration_s: float = 2.5) -> np.ndarray: - """Renderiza efecto de stutter con sonido mas musical y organico. - - Mejoras implementadas: - - Numero de slices dinamico segun duracion (5-9 slices) - - Posiciones no uniformes con variacion aleatoria natural - - Pitch shift hasta 1 semitono hacia el final - - Reverb en los gaps entre slices para espacialidad - - Fade windows mas cortos (~0.5ms) - - Variacion de ganancia y timing para menos mecanicidad - - Args: - source_path: Ruta al archivo fuente - duration_s: Duracion en segundos - - Returns: - Audio procesado - """ - # Validaciones defensivas - duration_s = max(0.1, float(duration_s)) - logger.debug("Rendering STUTTER FX: source=%s, duration=%.1fs", Path(source_path).name, duration_s) - - audio, _ = self._load_audio(source_path) - source = self._find_hot_slice(audio, 0.20) # Ligeramente mas largo para mas contenido - output_len = int(round(duration_s * self.sample_rate)) - - # Asegurar que output_len sea valido - output_len = max(1, output_len) - - output = np.zeros((output_len, source.shape[1]), dtype=np.float32) - output = _ensure_2d_float(output) - - # Numero dinamico de slices segun duracion (mas cortos = menos slices) - # 5 slices para <2s, hasta 9 slices para >4s - num_slices = int(5 + min(4, int(duration_s / 1.0))) - num_slices = max(5, min(9, num_slices)) - - # Generar posiciones base con curva exponencial (mas denso hacia el final) - # Esto crea un patron mas musical tipo "building up" - base_positions = [] - for i in range(num_slices): - # Curva exponencial: 0 -> 0.85 con densidad creciente - t = i / max(1, num_slices - 1) - # Funcion exponencial para agrupar mas hacia el final - pos = (t ** 1.6) * 0.85 - base_positions.append(pos) - - # Aplicar variacion aleatoria a las posiciones para sonido mas organico - # Usar hash del source_path como semilla para consistencia - seed_hash = int(hashlib.md5(source_path.encode()).hexdigest()[:8], 16) % 10000 - np.random.seed(seed_hash) - - positions = [] - for i, base_pos in enumerate(base_positions): - # Variacion de +/- 3% en posicion - variation = (np.random.random() - 0.5) * 0.06 - pos = (base_pos + variation) * duration_s - # Asegurar que no se solapen demasiado - if i > 0: - pos = max(pos, positions[-1] + 0.08) - positions.append(min(pos, duration_s - 0.1)) - - logger.debug("STUTTER: placing %d slices at positions: %s", num_slices, [round(p, 3) for p in positions]) - - # Duracion base del slice con variacion - base_slice_duration = 0.16 - - # Crear buffer de reverb para los gaps (cola de reverb corta) - reverb_tail_samples = int(0.08 * self.sample_rate) # 80ms de reverb tail - - for index, position in enumerate(positions): - start = int(round(float(position) * self.sample_rate)) - - # Variar duracion del gate: mas corto hacia el final con variacion aleatoria - gate_variation = (np.random.random() - 0.5) * 0.04 # +/- 20ms - gate_duration = base_slice_duration - (index * 0.012) + gate_variation - # Usar constante minima para slice de stutter - min_gate_duration = self._MIN_SAMPLES_FOR_SLICE / self.sample_rate - gate_duration = max(min_gate_duration, gate_duration) - if gate_duration == min_gate_duration: - logger.debug("Using minimum slice duration %.3fs for short audio", min_gate_duration) - gate_len = max(self._MIN_SAMPLES_FOR_SLICE, min(source.shape[0], int(round(gate_duration * self.sample_rate)))) - - # Extraer slice con copia - slice_audio = np.array(source[:gate_len], dtype=np.float32, copy=True) - slice_audio = _ensure_2d_float(slice_audio) - - # VALIDACION TEMPRANA: Verificar que el slice tiene contenido real - # _ensure_2d_float retorna (1,1) con zeros si esta vacio, verificamos shape - if slice_audio.shape[0] <= 1: - logger.debug("STUTTER: slice %d has invalid shape after ensure_2d_float %s, skipping", index, slice_audio.shape) - continue - - # Pitch shift mas extremo hacia el final (hasta 1 semitono = 1.0595) - # Aplicar desde el slice 3 en adelante - if index >= 3: - # Calcular pitch factor: va de 1.02 hasta ~1.06 (1 semitono) - pitch_progress = (index - 3) / max(1, num_slices - 4) - # Factor de pitch: 1.02 hasta 1.06 (casi 1 semitono) - pitch_factor = 1.02 + (pitch_progress * 0.04) - # Anadir pequena variacion aleatoria al pitch (+/- 10 cents) - pitch_variation = 1.0 + (np.random.random() - 0.5) * 0.012 - pitch_factor *= pitch_variation - - if scipy_signal is not None: - try: - pitched_len = max(1, int(len(slice_audio) / pitch_factor)) - pitched = np.zeros((pitched_len, slice_audio.shape[1]), dtype=np.float32) - for ch in range(slice_audio.shape[1]): - pitched[:, ch] = scipy_signal.resample(slice_audio[:, ch], pitched_len).astype(np.float32) - slice_audio = pitched - logger.debug("STUTTER: slice %d pitch shifted by factor %.3f", index, pitch_factor) - except Exception: - pass # Mantener slice original si falla - - # VALIDACION: Verificar que pitch shift no produjo array vacio - if slice_audio.size == 0: - logger.debug("STUTTER: slice %d empty after pitch shift, skipping", index) - continue - - # Aplicar ventana con fade mas corto (~0.5ms = 22 samples a 44.1kHz) - fade_samples = 22 # Reducido de 44 para transiciones mas rapidas - slice_audio = self._apply_slice_window(slice_audio, fade_samples=fade_samples) - - # VALIDACION: Verificar que window no produjo array vacio - if slice_audio.size == 0: - logger.debug("STUTTER: slice %d empty after window, skipping", index) - continue - - # Aplicar pequeño reverb al slice para espacialidad - # Wet mix bajo para no perder definicion - slice_audio = self._apply_short_reverb(slice_audio, decay=0.25, delay_ms=35.0) - - # VALIDACION: Verificar que reverb no produjo array vacio - if slice_audio.size == 0: - logger.debug("STUTTER: slice %d empty after reverb, skipping", index) - continue - - end = min(output_len, start + slice_audio.shape[0]) - if end <= start: - logger.debug("STUTTER: slice %d has invalid range (start=%d, end=%d), skipping", index, start, end) - continue - - # Ajustar slice al espacio disponible - actual_len = end - start - - # VALIDACION CRITICA: Asegurar que actual_len sea al menos 1 - if actual_len <= 0: - logger.debug("STUTTER: slice %d has actual_len=%d, skipping", index, actual_len) - continue - - # Trim solo si hay suficiente contenido despues del trim - if actual_len < slice_audio.shape[0]: - # Asegurar que el trim no produzca array vacio - if actual_len >= 1: - slice_audio = slice_audio[:actual_len] - else: - logger.debug("STUTTER: slice %d would become empty after trim (actual_len=%d), skipping", index, actual_len) - continue - - # VALIDACION FINAL: Verificar que slice_audio tiene contenido antes de mezclar - if slice_audio.size == 0: - logger.debug("STUTTER: slice %d is empty before mix, skipping", index) - continue - - # Ganancia variable por posicion con variacion aleatoria - # Mas alto hacia el final con pequenas variaciones - gain_base = 0.50 + (index * 0.07) - gain_variation = (np.random.random() - 0.5) * 0.08 # +/- 4% - gain = gain_base + gain_variation - gain = max(0.3, min(0.95, gain)) # Clamp entre 0.3 y 0.95 - - # Validate shapes before mixing - valid, msg = _validate_mix_shapes(output[start:end], slice_audio) - if not valid: - logger.debug("STUTTER: skipping slice %d at %d: %s", index, start, msg) - continue - - output[start:end] += slice_audio * gain - - # Agregar reverb "ghost" en el gap despues del slice (solo si no es el ultimo) - if index < len(positions) - 1: - gap_start = end - gap_end = min(output_len, gap_start + reverb_tail_samples) - if gap_end > gap_start: - # Crear ghost reverb tail muy sutil del slice anterior - ghost_len = gap_end - gap_start - - # VALIDACION: Asegurar que ghost_len es valido - if ghost_len <= 0: - logger.debug("STUTTER: slice %d has invalid ghost_len=%d, skipping ghost", index, ghost_len) - else: - ghost_audio = np.zeros((ghost_len, source.shape[1]), dtype=np.float32) - - # Copiar la cola del slice con decaimiento exponencial - # VALIDACION: Asegurar que tail_source tiene contenido - tail_samples = min(len(slice_audio), ghost_len * 2) - if tail_samples > 0: - tail_source = slice_audio[-tail_samples:] - if tail_source.size > 0: - decay_len = min(len(tail_source), ghost_len) - # VALIDACION: Asegurar que decay_len es valido - if decay_len > 0: - decay_curve = np.exp(-4.0 * np.linspace(0, 1, decay_len)).reshape(-1, 1).astype(np.float32) - # VALIDACION: El slicing defensivo asegura que tail_source[-decay_len:] tiene contenido - if tail_source[-decay_len:].size > 0: - ghost_audio[:decay_len] = tail_source[-decay_len:] * decay_curve * 0.15 - output[gap_start:gap_start + ghost_len] += ghost_audio - else: - logger.debug("STUTTER: slice %d tail_source slice is empty, skipping ghost", index) - else: - logger.debug("STUTTER: slice %d has invalid decay_len=%d, skipping ghost", index, decay_len) - else: - logger.debug("STUTTER: slice %d tail_source is empty, skipping ghost", index) - else: - logger.debug("STUTTER: slice %d has invalid tail_samples=%d, skipping ghost", index, tail_samples) - - # Fade global mas suave - output = self._apply_fade(output, fade_in_s=0.003, fade_out_s=0.15) - result = self._normalize(output) # Usa valor unificado por defecto - - # Fallback for empty render results - if result is None or result.size == 0: - logger.warning("STUTTER: fallback to silence (empty render result)") - result = np.zeros((int(2.5 * self.sample_rate), 2), dtype=np.float32) - - final_duration = len(result) / self.sample_rate - logger.debug("STUTTER: generated %s (duration=%.1fs, slices=%d)", Path(source_path).name, final_duration, num_slices) - return result - - - def _output_path(self, source_path: str, variant_seed: int, suffix: str) -> Path: - """Genera ruta de salida unica para un archivo procesado.""" - source = Path(source_path) - digest = hashlib.sha1(f"{source.resolve()}::{variant_seed}::{suffix}".encode("utf-8")).hexdigest()[:10] - return self.output_dir / f"{source.stem}_{suffix}_{digest}.wav" - - def _analyze_source_quality(self, audio: np.ndarray, sample_rate: int, fx_type: str) -> Dict[str, Any]: - """Analyzes source audio quality for FX derivation. - - Returns quality metrics for source selection decisions. - - Args: - audio: Audio array (samples, channels) - sample_rate: Sample rate in Hz - fx_type: Type of FX to derive ('reverse', 'riser', 'downlifter', 'stutter') - - Returns: - Dict with quality metrics: spectral_content, dynamic_range, suitability_score - """ - if audio is None or audio.size == 0: - return {"spectral_content": 0.0, "dynamic_range": 0.0, "suitability_score": 0.0, "recommended": False} - - audio = self._validate_audio_array(audio, context="_analyze_source_quality") - - # Filtrar por duración (máx 45s) para evitar canciones completas - duration = audio.shape[0] / sample_rate - if duration > 45.0: - logger.debug(f"Source analysis: rejecting long audio ({duration:.1f}s > 45s)") - return {"spectral_content": 0.0, "dynamic_range": 0.0, "rms": 0.0, "suitability_score": 0.0, "recommended": False} - - mono = np.mean(np.abs(audio), axis=1) if audio.ndim > 1 else np.abs(audio) - - rms = float(np.sqrt(np.mean(mono ** 2))) if mono.size > 0 else 0.0 - peak = float(np.max(mono)) if mono.size > 0 else 0.0 - dynamic_range = peak / max(rms, 1e-10) - - spectral_content = 0.5 - if scipy_signal is not None and mono.size >= 512: - try: - freqs = np.fft.rfft(mono[:min(2048, len(mono))]) - freq_magnitude = np.abs(freqs) - if freq_magnitude.size > 10: - low_energy = np.sum(freq_magnitude[:max(1, len(freq_magnitude)//8)]) - mid_energy = np.sum(freq_magnitude[max(1, len(freq_magnitude)//8):len(freq_magnitude)//2]) - high_energy = np.sum(freq_magnitude[len(freq_magnitude)//2:]) - total = low_energy + mid_energy + high_energy + 1e-10 - high_ratio = high_energy / total - mid_ratio = mid_energy / total - spectral_content = float(0.3 + 0.5 * (high_ratio + mid_ratio * 0.5)) - except Exception: - pass - - suitability_scores = { - "reverse": min(1.0, spectral_content * 0.7 + min(1.0, dynamic_range) * 0.3), - "riser": min(1.0, spectral_content * 0.5 + min(1.0, dynamic_range) * 0.4 + 0.1), - "downlifter": min(1.0, spectral_content * 0.5 + min(1.0, dynamic_range) * 0.4 + 0.1), - "stutter": min(1.0, 0.3 + spectral_content * 0.4 + min(1.0, dynamic_range) * 0.3), - } - - score = suitability_scores.get(fx_type, 0.5) - recommended = score >= 0.4 and dynamic_range >= 2.0 and rms >= 0.01 - - return { - "spectral_content": round(spectral_content, 3), - "dynamic_range": round(dynamic_range, 3), - "rms": round(rms, 4), - "suitability_score": round(score, 3), - "recommended": recommended, - } - - def _build_positions(self, sections: List[Dict[str, Any]], bpm: float = 128.0) -> Dict[str, List[float]]: - """Construye posiciones de FX basandose en la estructura de secciones. - - Phase 2 Improvements: - - BPM-aware timing for musical placement - - Precise reverse placement exactly at section boundaries - - Riser ends precisely before drops for maximum impact - - Downlifter placed after drops for clean section exits - - Professional stutter placement at build peaks and drop tails - - Enhanced section type detection (intro, breakdown, peak, etc.) - - Duplicate suppression with minimum spacing - - Quality-aware source selection - - Args: - sections: Lista de secciones con kind, name, beats - bpm: BPM del proyecto para timing musical - - Returns: - Diccionario con listas de posiciones por tipo de FX - """ - reverse_positions: List[float] = [] - riser_positions: List[float] = [] - downlifter_positions: List[float] = [] - stutter_positions: List[float] = [] - - offsets = _section_offsets(sections) - beat_duration = 60.0 / max(60.0, min(200.0, bpm)) - bar_duration = beat_duration * 4.0 - - def _add_unique(positions: List[float], value: float, min_spacing: float = 2.0) -> None: - if not any(abs(p - value) < min_spacing for p in positions): - positions.append(round(max(0.0, value), 3)) - - def _section_type(section: Dict[str, Any]) -> str: - kind = str(section.get("kind", "")).lower() - name = str(section.get("name", "")).lower() - if "intro" in kind or "intro" in name: - return "intro" - if "break" in kind or "break" in name or "breakdown" in name: - return "break" - if "build" in kind or "build" in name: - return "build" - if "drop" in kind or "drop" in name: - return "drop" - if "peak" in name or "main" in name: - return "peak" - if "outro" in kind or "outro" in name: - return "outro" - if "groove" in name: - return "groove" - return kind or "unknown" - - for index, (section, start, end) in enumerate(offsets): - section_type = _section_type(section) - name = str(section.get("name", "")).lower() - span = max(1.0, end - start) - is_peak = "peak" in name or "drop b" in name or "main" in name or "peak" in section_type - is_build = section_type == "build" - is_break = section_type == "break" - is_drop = section_type == "drop" - is_outro = section_type == "outro" - is_intro = section_type == "intro" - - reverse_bar_offset = bar_duration * 1.5 - if index > 0 and is_drop: - reverse_offset = min(8.0, max(4.0, reverse_bar_offset)) - _add_unique(reverse_positions, start - reverse_offset, min_spacing=3.0) - elif index > 0 and is_break: - reverse_offset = min(6.0, max(3.0, reverse_bar_offset * 0.8)) - _add_unique(reverse_positions, start - reverse_offset, min_spacing=2.5) - elif index > 0 and is_build: - if index > 1: - reverse_offset = min(7.0, max(3.0, reverse_bar_offset)) - _add_unique(reverse_positions, start - reverse_offset, min_spacing=2.0) - - if is_build: - riser_duration = min(12.0, max(4.0, span * 0.7)) - beat_duration_seconds = beat_duration - riser_quantized = (riser_duration / beat_duration_seconds) * beat_duration_seconds - riser_quantized = max(4.0, min(12.0, riser_quantized)) - riser_start = max(start, end - riser_quantized) - _add_unique(riser_positions, riser_start, min_spacing=4.0) - - stutter_offset = bar_duration * 0.5 - stutter_start = max(start, end - stutter_offset - 0.5) - _add_unique(stutter_positions, stutter_start, min_spacing=1.5) - - if is_break and not is_peak: - downlifter_offset = bar_duration * 0.25 - _add_unique(downlifter_positions, start + downlifter_offset, min_spacing=3.0) - - elif is_drop and not is_peak: - down_offset = bar_duration * 0.3 - _add_unique(downlifter_positions, start + down_offset, min_spacing=3.0) - - if is_outro: - if span > bar_duration * 2: - _add_unique(downlifter_positions, start + bar_duration, min_spacing=3.0) - outro_down_position = start + span * 0.45 - _add_unique(downlifter_positions, outro_down_position, min_spacing=2.5) - - if is_peak and span > bar_duration: - stutter_offset = min(bar_duration * 1.5, span * 0.25) - _add_unique(stutter_positions, end - stutter_offset, min_spacing=1.5) - - if span > bar_duration * 3: - peak_stutter_position = start + span * 0.55 - _add_unique(stutter_positions, peak_stutter_position, min_spacing=bar_duration) - - if is_intro and span > bar_duration * 2: - intro_reverse_offset = bar_duration * 0.75 - _add_unique(reverse_positions, start + intro_reverse_offset, min_spacing=2.5) - - return { - "reverse": sorted(set(reverse_positions)), - "riser": sorted(set(riser_positions)), - "downlifter": sorted(set(downlifter_positions)), - "stutter": sorted(set(stutter_positions)), - } - - def build_transition_layers( - self, - reference_audio_plan: Dict[str, Any], - sections: List[Dict[str, Any]], - project_bpm: float, - variant_seed: Optional[int] = None, - ) -> List[Dict[str, Any]]: - """Construye capas de transicion desde un plan de audio de referencia. - - Args: - reference_audio_plan: Plan con matches de audio - sections: Lista de secciones del proyecto - project_bpm: BPM del proyecto - variant_seed: Semilla para variacion - - Returns: - Lista de diccionarios con info de capas generadas - """ - logger.debug("build_transition_layers called: bpm=%.1f, variant_seed=%s", project_bpm, variant_seed) - - if not isinstance(reference_audio_plan, dict): - logger.debug("reference_audio_plan is not a dict, returning empty layers") - return [] - - selected = reference_audio_plan.get("matches", {}) or {} - if not isinstance(selected, dict): - logger.debug("matches is not a dict, returning empty layers") - return [] - - # Validar project_bpm - project_bpm = max(20.0, min(300.0, float(project_bpm or 120.0))) - - variant_seed = int(variant_seed or 0) - positions = self._build_positions(sections, bpm=project_bpm) - logger.debug("Calculated FX positions: reverse=%s, riser=%s, downlifter=%s, stutter=%s", - positions["reverse"], positions["riser"], positions["downlifter"], positions["stutter"]) - layers: List[Dict[str, Any]] = [] - - FX_SOURCE_PRIORITIES = { - "reverse": [ - ("crash_fx", 0.9), - ("fill_fx", 0.85), - ("atmos_fx", 0.75), - ("synth_loop", 0.65), - ("vocal_shot", 0.55), - ], - "riser": [ - ("synth_loop", 0.9), - ("vocal_loop", 0.85), - ("atmos_fx", 0.8), - ("pad", 0.6), - ], - "downlifter": [ - ("crash_fx", 0.9), - ("atmos_fx", 0.85), - ("synth_loop", 0.7), - ("fill_fx", 0.65), - ], - "stutter": [ - ("vocal_shot", 0.95), - ("vocal_loop", 0.85), - ("snare_roll", 0.8), - ("synth_peak", 0.65), - ], - } - - FX_FALLBACK_QUERIES = { - "reverse": ["crash", "cymbal", "impact"], - "riser": ["riser", "buildup", "sweep"], - "downlifter": ["atmos", "drone", "texture"], - "stutter": ["vocal", "synth", "chord", "fx"], - } - - def _find_fallback_source(fx_type: str) -> str: - """Find source directly from SampleManager when selected is empty.""" - try: - import importlib.util - PACKAGE_DIR = Path(__file__).resolve().parent.parent - sample_manager_path = PACKAGE_DIR / "MCP_Server" / "sample_manager.py" - if sample_manager_path.exists(): - spec = importlib.util.spec_from_file_location("sample_manager", sample_manager_path) - sm_mod = importlib.util.module_from_spec(spec) - spec.loader.exec_module(sm_mod) - manager = sm_mod.get_manager() - else: - from .sample_manager import get_manager - manager = get_manager() - if manager is None: - return "" - queries = FX_FALLBACK_QUERIES.get(fx_type, []) - for query in queries: - samples = manager.search(query=query, limit=5) - for sample in samples: - path = str(sample.path) - if Path(path).exists(): - try: - audio, sr = self._load_audio(path) - if audio is not None and audio.shape[0] > 1000: - logger.debug("Fallback source %s found for %s FX", Path(path).name, fx_type) - return path - except Exception: - continue - except Exception as e: - logger.debug("Fallback search failed for %s: %s", fx_type, e) - return "" - - def find_best_source(fx_type: str) -> str: - """Find best source for FX type based on quality and priority.""" - priorities = FX_SOURCE_PRIORITIES.get(fx_type, []) - for key, base_score in priorities: - item = selected.get(key) - if isinstance(item, dict): - path = str(item.get("path", "") or "") - if path: - try: - audio, sr = self._load_audio(path) - quality = self._analyze_source_quality(audio, sr, fx_type) - if quality.get("recommended", False): - adjusted_score = base_score * quality.get("suitability_score", 0.5) - if adjusted_score >= 0.35: - logger.debug("Source %s selected for %s FX: quality=%.2f, score=%.2f", - Path(path).name, fx_type, quality.get("suitability_score", 0), adjusted_score) - return path - logger.debug("Source %s rejected for %s FX: quality=%.2f, recommended=%s", - Path(path).name, fx_type, quality.get("suitability_score", 0), quality.get("recommended")) - except Exception as e: - logger.debug("Could not analyze source %s for %s: %s", path, fx_type, e) - for key, _ in priorities: - item = selected.get(key) - if isinstance(item, dict): - path = str(item.get("path", "") or "") - if path: - return path - fallback = _find_fallback_source(fx_type) - if fallback: - logger.info("Using fallback source for %s FX: %s", fx_type, Path(fallback).name) - return fallback - - def source_path(*keys: str) -> str: - for key in keys: - item = selected.get(key) - if isinstance(item, dict): - path = str(item.get("path", "") or "") - if path: - return path - return "" - - def maybe_add(name: str, path: str, output_suffix: str, color: int, volume: float, beat_positions: List[float], renderer): - if not path or not beat_positions: - logger.debug("Skipping %s: path=%s, positions=%s", name, path if path else "(empty)", beat_positions if beat_positions else "(empty)") - return - try: - logger.debug("Generating %s from %s, duration=%.1fs, positions=%s", - name, Path(path).name, 4.0 if "REVERSE" in name else (8.0 if "RISER" in name else (6.0 if "DOWNLIFTER" in name else 2.5)), beat_positions) - rendered = renderer(path) - output_path = self._output_path(path, variant_seed, output_suffix) - file_path = self._write_audio(output_path, rendered, self.sample_rate) - logger.debug("Successfully generated %s -> %s", name, Path(file_path).name) - except Exception as exc: - logger.warning("No se pudo generar %s desde %s: %s", name, Path(path).name, exc) - logger.debug("Error details for %s: type=%s, message=%s", name, type(exc).__name__, exc) - return - layers.append({ - "name": name, - "file_path": file_path, - "positions": beat_positions, - "color": color, - "volume": volume, - "source": Path(path).name, - "generated": True, - }) - - reverse_source = find_best_source("reverse") - if reverse_source and positions["reverse"]: - maybe_add( - "AUDIO RESAMPLE REVERSE FX", - reverse_source, - "reverse_fx", - 26, - 0.58, - positions["reverse"], - lambda path: self._render_reverse_fx(path, duration_s=4.0, project_bpm=project_bpm), - ) - else: - fallback_reverse = source_path("crash_fx", "fill_fx", "atmos_fx", "synth_loop", "vocal_shot") - if fallback_reverse and positions["reverse"]: - maybe_add( - "AUDIO RESAMPLE REVERSE FX", - fallback_reverse, - "reverse_fx", - 26, - 0.58, - positions["reverse"], - lambda path: self._render_reverse_fx(path, duration_s=4.0, project_bpm=project_bpm), - ) - - riser_source = find_best_source("riser") - if riser_source and positions["riser"]: - maybe_add( - "AUDIO RESAMPLE RISER", - riser_source, - "riser_fx", - 27, - 0.54, - positions["riser"], - lambda path: self._render_riser(path, duration_s=8.0 if project_bpm >= 126 else 7.0, bpm=project_bpm), - ) - else: - fallback_riser = source_path("synth_loop", "vocal_loop", "atmos_fx", "pad") - if fallback_riser and positions["riser"]: - maybe_add( - "AUDIO RESAMPLE RISER", - fallback_riser, - "riser_fx", - 27, - 0.54, - positions["riser"], - lambda path: self._render_riser(path, duration_s=8.0 if project_bpm >= 126 else 7.0, bpm=project_bpm), - ) - - downlifter_source = find_best_source("downlifter") - if downlifter_source and positions["downlifter"]: - maybe_add( - "AUDIO RESAMPLE DOWNLIFTER", - downlifter_source, - "downlifter_fx", - 54, - 0.50, - positions["downlifter"], - lambda path: self._render_downlifter(path, duration_s=6.0, bpm=project_bpm), - ) - else: - fallback_downlifter = source_path("crash_fx", "atmos_fx", "synth_loop", "fill_fx") - if fallback_downlifter and positions["downlifter"]: - maybe_add( - "AUDIO RESAMPLE DOWNLIFTER", - fallback_downlifter, - "downlifter_fx", - 54, - 0.50, - positions["downlifter"], - lambda path: self._render_downlifter(path, duration_s=6.0, bpm=project_bpm), - ) - - stutter_source = find_best_source("stutter") - if stutter_source and positions["stutter"]: - try: - source_audio, _ = self._load_audio(stutter_source) - min_samples = 1000 - if source_audio.shape[0] < min_samples: - logger.warning("Skipping STUTTER layer: source audio too short (%d samples, min %d)", - source_audio.shape[0], min_samples) - else: - quality = self._analyze_source_quality(source_audio, self.sample_rate, "stutter") - if quality.get("suitability_score", 0) >= 0.25: - maybe_add( - "AUDIO RESAMPLE STUTTER", - stutter_source, - "stutter_fx", - 41, - 0.56, - positions["stutter"], - lambda path: self._render_stutter(path, duration_s=2.5), - ) - else: - logger.debug("STUTTER source quality too low: %.2f", quality.get("suitability_score", 0)) - except Exception as exc: - logger.warning("Skipping STUTTER layer: failed to validate source: %s", exc) - else: - fallback_stutter = source_path("vocal_shot", "vocal_loop", "snare_roll", "synth_peak") - if fallback_stutter and positions["stutter"]: - try: - source_audio, _ = self._load_audio(fallback_stutter) - min_samples = 1000 - if source_audio.shape[0] >= min_samples: - maybe_add( - "AUDIO RESAMPLE STUTTER", - fallback_stutter, - "stutter_fx", - 41, - 0.56, - positions["stutter"], - lambda path: self._render_stutter(path, duration_s=2.5), - ) - except Exception as exc: - logger.warning("Fallback STUTTER also failed: %s", exc) - - logger.info("Created %d derived layers: %s", len(layers), [layer['name'] for layer in layers]) - return layers - - def invalidate_stale_cache(self) -> int: - """Elimina entradas de cache cuyos archivos han sido modificados. - - Este metodo verifica cada entrada en el cache y elimina aquellas - donde el archivo tiene un mtime diferente al que esta en la key. - - Nota: Con el diseno actual donde mtime es parte de la key, las - entradas stale naturalmente expiran por LRU. Este metodo es - utilitario para limpieza proactiva. - - Returns: - Numero de entradas eliminadas - """ - removed = 0 - keys_to_remove: List[str] = [] - - for key in list(self._audio_cache.keys()): - # Extraer path de la key (formato: "path::mtime_ns" o solo "path") - if "::" in key: - path_str, _ = key.rsplit("::", 1) - else: - path_str = key - - path = Path(path_str) - - # Verificar si el archivo aun existe y tiene el mismo mtime - if not path.exists(): - # Archivo eliminado, marcar para remover - keys_to_remove.append(key) - removed += 1 - continue - - try: - current_mtime_ns = path.stat().st_mtime_ns - # Reconstruir la key esperada con el mtime actual - expected_key = self._get_cache_key(path_str, current_mtime_ns) - - # Si la key actual no coincide con la esperada, el archivo cambio - if key != expected_key: - keys_to_remove.append(key) - removed += 1 - except OSError: - # Error al acceder al archivo, marcar para remover - keys_to_remove.append(key) - removed += 1 - - # Remover las entradas stale - for key in keys_to_remove: - del self._audio_cache[key] - - if removed > 0: - logger.debug("Invalidadas %d entradas de cache stale", removed) - - return removed - - def clear_cache(self) -> int: - """Limpia el cache de audio y devuelve el numero de entradas eliminadas. - - Returns: - Numero de entradas que fueron eliminadas del cache - """ - count = len(self._audio_cache) - self._audio_cache.clear() - self._cache_sizes.clear() - self._cache_total_bytes = 0 - self._cache_hits = 0 - self._cache_misses = 0 - return count - - def cache_size(self) -> int: - """Devuelve el numero de archivos en cache. - - Returns: - Numero de entradas en cache - """ - return len(self._audio_cache) - - def cache_stats(self) -> Dict[str, Any]: - """Devuelve estadisticas del cache de audio. - - Phase 1 Improvement: Metodo nuevo para monitorear rendimiento del cache. - - Returns: - Diccionario con estadisticas: entries, bytes, hits, misses, hit_rate - """ - total_requests = self._cache_hits + self._cache_misses - hit_rate = self._cache_hits / total_requests if total_requests > 0 else 0.0 - - return { - "entries": len(self._audio_cache), - "max_entries": self._CACHE_LIMIT, - "bytes": self._cache_total_bytes, - "max_bytes": self._CACHE_MAX_SIZE_BYTES, - "mb": round(self._cache_total_bytes / (1024 * 1024), 2), - "hits": self._cache_hits, - "misses": self._cache_misses, - "hit_rate": round(hit_rate, 3), - "max_age_s": self._CACHE_MAX_AGE_S, - } diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/enhanced_device_automation.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/enhanced_device_automation.py deleted file mode 100644 index 213cb15..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/enhanced_device_automation.py +++ /dev/null @@ -1,431 +0,0 @@ -""" -Enhanced Device Automation for Timbral Movement Between Sections. -This module provides expanded device automation parameters for musical variation. -""" - -# ============================================================================= -# ENHANCED SECTION DEVICE AUTOMATION - More timbral color per section -# ============================================================================= - -# Automatizacion de devices en tracks individuales por rol - ENHANCED -SECTION_DEVICE_AUTOMATION = { - # BASS - Filtros, drive y compresion dinamica - 'bass': { - 'Saturator': { - 'Drive': {'intro': 1.5, 'build': 3.5, 'drop': 5.0, 'break': 2.0, 'outro': 1.8}, - 'Dry/Wet': {'intro': 0.12, 'build': 0.22, 'drop': 0.30, 'break': 0.15, 'outro': 0.10}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 6200.0, 'build': 8500.0, 'drop': 12000.0, 'break': 4800.0, 'outro': 5800.0}, - 'Dry/Wet': {'intro': 0.08, 'build': 0.18, 'drop': 0.12, 'break': 0.22, 'outro': 0.06}, - 'Resonance': {'intro': 0.25, 'build': 0.35, 'drop': 0.20, 'break': 0.40, 'outro': 0.28}, - }, - 'Compressor': { - 'Threshold': {'intro': -12.0, 'build': -14.0, 'drop': -18.0, 'break': -10.0, 'outro': -11.0}, - 'Ratio': {'intro': 2.5, 'build': 3.0, 'drop': 4.0, 'break': 2.0, 'outro': 2.2}, - }, - 'Utility': { - 'Stereo Width': {'intro': 0.0, 'build': 0.0, 'drop': 0.0, 'break': 0.0, 'outro': 0.0}, - }, - }, - 'sub_bass': { - 'Saturator': { - 'Drive': {'intro': 1.0, 'build': 2.5, 'drop': 4.0, 'break': 1.5, 'outro': 1.2}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 5200.0, 'build': 7200.0, 'drop': 10000.0, 'break': 4200.0, 'outro': 4800.0}, - 'Dry/Wet': {'intro': 0.05, 'build': 0.10, 'drop': 0.06, 'break': 0.14, 'outro': 0.04}, - }, - 'Utility': { - 'Width': {'intro': 0.0, 'build': 0.0, 'drop': 0.0, 'break': 0.0, 'outro': 0.0}, - 'Gain': {'intro': 0.0, 'build': 0.2, 'drop': 0.4, 'break': -0.2, 'outro': 0.0}, - }, - }, - # PAD - Filtros envolventes con width y reverb - 'pad': { - 'Auto Filter': { - 'Frequency': {'intro': 4500.0, 'build': 8000.0, 'drop': 11000.0, 'break': 3200.0, 'outro': 4000.0}, - 'Dry/Wet': {'intro': 0.25, 'build': 0.18, 'drop': 0.12, 'break': 0.35, 'outro': 0.28}, - 'Resonance': {'intro': 0.20, 'build': 0.28, 'drop': 0.15, 'break': 0.35, 'outro': 0.22}, - }, - 'Hybrid Reverb': { - 'Dry/Wet': {'intro': 0.22, 'build': 0.16, 'drop': 0.10, 'break': 0.28, 'outro': 0.24}, - 'Decay Time': {'intro': 3.5, 'build': 2.8, 'drop': 2.0, 'break': 4.2, 'outro': 3.8}, - }, - 'Utility': { - 'Stereo Width': {'intro': 0.85, 'build': 1.02, 'drop': 1.12, 'break': 1.25, 'outro': 0.90}, - }, - 'Saturator': { - 'Drive': {'intro': 0.8, 'build': 1.5, 'drop': 2.5, 'break': 0.6, 'outro': 0.7}, - 'Dry/Wet': {'intro': 0.10, 'build': 0.15, 'drop': 0.20, 'break': 0.08, 'outro': 0.12}, - }, - }, - # ATMOS - Filtros espaciales con movement - 'atmos': { - 'Auto Filter': { - 'Frequency': {'intro': 3800.0, 'build': 7200.0, 'drop': 9800.0, 'break': 2800.0, 'outro': 3500.0}, - 'Dry/Wet': {'intro': 0.30, 'build': 0.22, 'drop': 0.15, 'break': 0.40, 'outro': 0.32}, - 'Resonance': {'intro': 0.22, 'build': 0.32, 'drop': 0.18, 'break': 0.42, 'outro': 0.25}, - }, - 'Hybrid Reverb': { - 'Dry/Wet': {'intro': 0.35, 'build': 0.28, 'drop': 0.18, 'break': 0.42, 'outro': 0.38}, - 'Decay Time': {'intro': 4.0, 'build': 3.2, 'drop': 2.2, 'break': 5.0, 'outro': 4.5}, - }, - 'Utility': { - 'Stereo Width': {'intro': 0.70, 'build': 0.88, 'drop': 1.05, 'break': 1.20, 'outro': 0.75}, - }, - }, - # FX ELEMENTS - 'reverse_fx': { - 'Auto Filter': { - 'Frequency': {'intro': 5200.0, 'build': 9000.0, 'drop': 12000.0, 'break': 6000.0, 'outro': 4800.0}, - 'Dry/Wet': {'intro': 0.20, 'build': 0.28, 'drop': 0.15, 'break': 0.35, 'outro': 0.22}, - }, - 'Hybrid Reverb': { - 'Dry/Wet': {'intro': 0.30, 'build': 0.35, 'drop': 0.20, 'break': 0.40, 'outro': 0.28}, - 'Decay Time': {'intro': 3.0, 'build': 4.5, 'drop': 2.5, 'break': 5.5, 'outro': 3.5}, - }, - 'Saturator': { - 'Drive': {'intro': 1.2, 'build': 2.8, 'drop': 4.5, 'break': 1.8, 'outro': 1.0}, - }, - }, - 'riser': { - 'Auto Filter': { - 'Frequency': {'intro': 4000.0, 'build': 10000.0, 'drop': 14000.0, 'break': 5500.0, 'outro': 4200.0}, - 'Dry/Wet': {'intro': 0.15, 'build': 0.30, 'drop': 0.12, 'break': 0.22, 'outro': 0.18}, - }, - 'Hybrid Reverb': { - 'Dry/Wet': {'intro': 0.25, 'build': 0.40, 'drop': 0.22, 'break': 0.35, 'outro': 0.20}, - 'Decay Time': {'intro': 2.5, 'build': 5.0, 'drop': 3.0, 'break': 4.0, 'outro': 2.8}, - }, - 'Echo': { - 'Dry/Wet': {'intro': 0.18, 'build': 0.35, 'drop': 0.15, 'break': 0.25, 'outro': 0.15}, - 'Feedback': {'intro': 0.30, 'build': 0.55, 'drop': 0.25, 'break': 0.45, 'outro': 0.28}, - }, - 'Saturator': { - 'Drive': {'intro': 1.5, 'build': 4.0, 'drop': 3.0, 'break': 2.5, 'outro': 1.2}, - }, - }, - 'impact': { - 'Hybrid Reverb': { - 'Dry/Wet': {'intro': 0.15, 'build': 0.18, 'drop': 0.12, 'break': 0.20, 'outro': 0.14}, - 'Decay Time': {'intro': 2.0, 'build': 2.5, 'drop': 1.8, 'break': 3.0, 'outro': 2.2}, - }, - 'Saturator': { - 'Drive': {'intro': 1.8, 'build': 2.5, 'drop': 3.5, 'break': 2.0, 'outro': 1.5}, - }, - }, - 'drone': { - 'Auto Filter': { - 'Frequency': {'intro': 3000.0, 'build': 6500.0, 'drop': 9000.0, 'break': 2500.0, 'outro': 2800.0}, - 'Dry/Wet': {'intro': 0.20, 'build': 0.15, 'drop': 0.10, 'break': 0.30, 'outro': 0.22}, - 'Resonance': {'intro': 0.25, 'build': 0.35, 'drop': 0.22, 'break': 0.40, 'outro': 0.28}, - }, - 'Hybrid Reverb': { - 'Dry/Wet': {'intro': 0.18, 'build': 0.14, 'drop': 0.08, 'break': 0.25, 'outro': 0.20}, - 'Decay Time': {'intro': 4.5, 'build': 3.5, 'drop': 2.5, 'break': 5.5, 'outro': 4.8}, - }, - 'Saturator': { - 'Drive': {'intro': 0.8, 'build': 1.8, 'drop': 2.8, 'break': 0.6, 'outro': 0.7}, - }, - }, - # HATS - Filtros de brillantez con resonance y saturacion - 'hat_closed': { - 'Auto Filter': { - 'Frequency': {'intro': 12000.0, 'build': 14000.0, 'drop': 16000.0, 'break': 10000.0, 'outro': 11000.0}, - 'Dry/Wet': {'intro': 0.12, 'build': 0.16, 'drop': 0.10, 'break': 0.20, 'outro': 0.14}, - 'Resonance': {'intro': 0.15, 'build': 0.25, 'drop': 0.12, 'outro': 0.18, 'break': 0.30}, - }, - 'Saturator': { - 'Drive': {'intro': 0.5, 'build': 1.2, 'drop': 1.8, 'break': 0.8, 'outro': 0.6}, - }, - }, - 'hat_open': { - 'Auto Filter': { - 'Frequency': {'intro': 9000.0, 'build': 11000.0, 'drop': 13000.0, 'break': 7500.0, 'outro': 8500.0}, - 'Dry/Wet': {'intro': 0.18, 'build': 0.22, 'drop': 0.14, 'break': 0.28, 'outro': 0.20}, - 'Resonance': {'intro': 0.18, 'build': 0.28, 'drop': 0.15, 'outro': 0.20, 'break': 0.35}, - }, - 'Echo': { - 'Dry/Wet': {'intro': 0.08, 'build': 0.15, 'drop': 0.10, 'break': 0.22, 'outro': 0.12}, - }, - }, - 'top_loop': { - 'Auto Filter': { - 'Frequency': {'intro': 8500.0, 'build': 10500.0, 'drop': 12500.0, 'break': 7000.0, 'outro': 8000.0}, - 'Dry/Wet': {'intro': 0.20, 'build': 0.25, 'drop': 0.16, 'break': 0.32, 'outro': 0.22}, - 'Resonance': {'intro': 0.12, 'build': 0.22, 'drop': 0.14, 'outro': 0.15, 'break': 0.28}, - }, - 'Echo': { - 'Dry/Wet': {'intro': 0.05, 'build': 0.12, 'drop': 0.08, 'break': 0.18, 'outro': 0.10}, - }, - }, - # SYNTHS - 'chords': { - 'Auto Filter': { - 'Frequency': {'intro': 5500.0, 'build': 8500.0, 'drop': 11000.0, 'break': 4000.0, 'outro': 5000.0}, - 'Dry/Wet': {'intro': 0.15, 'build': 0.20, 'drop': 0.12, 'break': 0.28, 'outro': 0.18}, - 'Resonance': {'intro': 0.18, 'build': 0.28, 'drop': 0.15, 'outro': 0.20, 'break': 0.35}, - }, - 'Echo': { - 'Dry/Wet': {'intro': 0.10, 'build': 0.18, 'drop': 0.08, 'break': 0.22, 'outro': 0.12}, - 'Feedback': {'intro': 0.25, 'build': 0.40, 'drop': 0.30, 'break': 0.45, 'outro': 0.28}, - }, - 'Saturator': { - 'Drive': {'intro': 1.2, 'build': 2.2, 'drop': 3.5, 'break': 1.5, 'outro': 1.0}, - }, - 'Utility': { - 'Stereo Width': {'intro': 0.95, 'build': 1.05, 'drop': 1.15, 'break': 1.25, 'outro': 1.00}, - }, - }, - 'lead': { - 'Saturator': { - 'Drive': {'intro': 1.0, 'build': 2.5, 'drop': 4.0, 'break': 1.5, 'outro': 1.2}, - 'Dry/Wet': {'intro': 0.12, 'build': 0.20, 'drop': 0.25, 'break': 0.10, 'outro': 0.15}, - }, - 'Echo': { - 'Dry/Wet': {'intro': 0.08, 'build': 0.15, 'drop': 0.10, 'break': 0.18, 'outro': 0.10}, - 'Feedback': {'intro': 0.20, 'build': 0.35, 'drop': 0.28, 'break': 0.40, 'outro': 0.22}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 6500.0, 'build': 9500.0, 'drop': 12500.0, 'break': 4500.0, 'outro': 5500.0}, - 'Dry/Wet': {'intro': 0.08, 'build': 0.15, 'drop': 0.10, 'break': 0.20, 'outro': 0.12}, - }, - 'Utility': { - 'Stereo Width': {'intro': 0.90, 'build': 1.02, 'drop': 1.10, 'break': 1.18, 'outro': 0.95}, - }, - }, - 'stab': { - 'Saturator': { - 'Drive': {'intro': 2.0, 'build': 3.5, 'drop': 5.0, 'break': 2.5, 'outro': 2.2}, - 'Dry/Wet': {'intro': 0.18, 'build': 0.25, 'drop': 0.30, 'break': 0.15, 'outro': 0.20}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 6000.0, 'build': 9000.0, 'drop': 12000.0, 'break': 5000.0, 'outro': 5500.0}, - 'Dry/Wet': {'intro': 0.10, 'build': 0.15, 'drop': 0.08, 'break': 0.22, 'outro': 0.12}, - }, - 'Utility': { - 'Stereo Width': {'intro': 0.88, 'build': 1.00, 'drop': 1.12, 'break': 1.20, 'outro': 0.92}, - }, - }, - 'pluck': { - 'Echo': { - 'Dry/Wet': {'intro': 0.12, 'build': 0.22, 'drop': 0.14, 'break': 0.28, 'outro': 0.15}, - 'Feedback': {'intro': 0.30, 'build': 0.45, 'drop': 0.35, 'break': 0.50, 'outro': 0.32}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 7000.0, 'build': 10000.0, 'drop': 13000.0, 'break': 5500.0, 'outro': 6500.0}, - 'Dry/Wet': {'intro': 0.08, 'build': 0.15, 'drop': 0.10, 'break': 0.20, 'outro': 0.12}, - }, - 'Saturator': { - 'Drive': {'intro': 0.8, 'build': 1.8, 'drop': 2.8, 'break': 1.2, 'outro': 0.9}, - }, - }, - 'arp': { - 'Echo': { - 'Dry/Wet': {'intro': 0.15, 'build': 0.28, 'drop': 0.18, 'break': 0.35, 'outro': 0.18}, - 'Feedback': {'intro': 0.35, 'build': 0.50, 'drop': 0.40, 'break': 0.58, 'outro': 0.38}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 6500.0, 'build': 9500.0, 'drop': 12500.0, 'break': 5000.0, 'outro': 6000.0}, - 'Dry/Wet': {'intro': 0.12, 'build': 0.18, 'drop': 0.14, 'break': 0.25, 'outro': 0.15}, - }, - 'Saturator': { - 'Drive': {'intro': 0.6, 'build': 1.5, 'drop': 2.5, 'break': 1.0, 'outro': 0.7}, - }, - }, - 'counter': { - 'Echo': { - 'Dry/Wet': {'intro': 0.10, 'build': 0.18, 'drop': 0.12, 'break': 0.22, 'outro': 0.12}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 6000.0, 'build': 8800.0, 'drop': 11500.0, 'break': 4800.0, 'outro': 5200.0}, - 'Dry/Wet': {'intro': 0.10, 'build': 0.16, 'drop': 0.12, 'break': 0.22, 'outro': 0.14}, - }, - 'Utility': { - 'Stereo Width': {'intro': 0.75, 'build': 0.92, 'drop': 1.08, 'break': 1.15, 'outro': 0.80}, - }, - }, - # VOCAL - 'vocal': { - 'Echo': { - 'Dry/Wet': {'intro': 0.12, 'build': 0.25, 'drop': 0.15, 'break': 0.30, 'outro': 0.14}, - 'Feedback': {'intro': 0.25, 'build': 0.42, 'drop': 0.30, 'break': 0.48, 'outro': 0.28}, - }, - 'Hybrid Reverb': { - 'Dry/Wet': {'intro': 0.08, 'build': 0.15, 'drop': 0.06, 'break': 0.18, 'outro': 0.10}, - 'Decay Time': {'intro': 2.5, 'build': 3.5, 'drop': 2.0, 'break': 4.0, 'outro': 2.8}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 6000.0, 'build': 9000.0, 'drop': 11000.0, 'break': 5000.0, 'outro': 5500.0}, - 'Dry/Wet': {'intro': 0.10, 'build': 0.16, 'drop': 0.10, 'break': 0.20, 'outro': 0.12}, - }, - 'Saturator': { - 'Drive': {'intro': 0.8, 'build': 1.8, 'drop': 2.5, 'break': 1.2, 'outro': 0.9}, - }, - }, - # DRUMS - Sin automatizacion de devices (manejados por volumen/sends) - 'kick': {}, - 'clap': {}, - 'snare_fill': {}, - 'perc': {}, - 'ride': {}, - 'tom_fill': {}, - 'crash': {}, - 'sc_trigger': {}, -} - -# ============================================================================= -# ENHANCED BUS DEVICE AUTOMATION - More drive/compression per section -# ============================================================================= - -BUS_DEVICE_AUTOMATION = { - 'drums': { - 'Compressor': { - 'Threshold': {'intro': -14.0, 'build': -16.0, 'drop': -18.5, 'break': -12.0, 'outro': -13.5}, - 'Ratio': {'intro': 2.5, 'build': 3.0, 'drop': 4.0, 'break': 2.2, 'outro': 2.4}, - 'Attack': {'intro': 0.015, 'build': 0.010, 'drop': 0.005, 'break': 0.020, 'outro': 0.018}, - }, - 'Saturator': { - 'Drive': {'intro': 0.8, 'build': 1.5, 'drop': 2.5, 'break': 1.0, 'outro': 0.9}, - 'Dry/Wet': {'intro': 0.08, 'build': 0.15, 'drop': 0.22, 'break': 0.10, 'outro': 0.10}, - }, - 'Limiter': { - 'Gain': {'intro': 0.2, 'build': 0.3, 'drop': 0.5, 'break': 0.15, 'outro': 0.18}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 8500.0, 'build': 10000.0, 'drop': 14000.0, 'break': 6500.0, 'outro': 7500.0}, - 'Dry/Wet': {'intro': 0.12, 'build': 0.10, 'drop': 0.05, 'break': 0.18, 'outro': 0.14}, - }, - }, - 'bass': { - 'Saturator': { - 'Drive': {'intro': 1.0, 'build': 2.0, 'drop': 3.5, 'break': 1.5, 'outro': 1.2}, - 'Dry/Wet': {'intro': 0.10, 'build': 0.18, 'drop': 0.25, 'break': 0.12, 'outro': 0.10}, - }, - 'Compressor': { - 'Threshold': {'intro': -15.0, 'build': -17.0, 'drop': -20.0, 'break': -14.0, 'outro': -14.5}, - 'Ratio': {'intro': 3.0, 'build': 3.5, 'drop': 4.5, 'break': 2.8, 'outro': 3.0}, - 'Attack': {'intro': 0.020, 'build': 0.015, 'drop': 0.008, 'break': 0.025, 'outro': 0.022}, - }, - 'Utility': { - 'Stereo Width': {'intro': 0.0, 'build': 0.0, 'drop': 0.0, 'break': 0.0, 'outro': 0.0}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 5000.0, 'build': 7000.0, 'drop': 10000.0, 'break': 4500.0, 'outro': 5200.0}, - 'Dry/Wet': {'intro': 0.05, 'build': 0.08, 'drop': 0.12, 'break': 0.10, 'outro': 0.06}, - }, - }, - 'music': { - 'Compressor': { - 'Threshold': {'intro': -19.0, 'build': -20.0, 'drop': -22.0, 'break': -18.0, 'outro': -18.5}, - 'Ratio': {'intro': 2.0, 'build': 2.5, 'drop': 3.0, 'break': 1.8, 'outro': 2.0}, - 'Attack': {'intro': 0.025, 'build': 0.020, 'drop': 0.015, 'break': 0.030, 'outro': 0.028}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 8000.0, 'build': 11000.0, 'drop': 14000.0, 'break': 6000.0, 'outro': 7500.0}, - 'Dry/Wet': {'intro': 0.08, 'build': 0.05, 'drop': 0.03, 'break': 0.12, 'outro': 0.10}, - }, - 'Utility': { - 'Stereo Width': {'intro': 1.05, 'build': 1.10, 'drop': 1.12, 'break': 1.18, 'outro': 1.08}, - }, - 'Saturator': { - 'Drive': {'intro': 0.3, 'build': 0.8, 'drop': 1.5, 'break': 0.4, 'outro': 0.35}, - 'Dry/Wet': {'intro': 0.05, 'build': 0.10, 'drop': 0.15, 'break': 0.08, 'outro': 0.06}, - }, - }, - 'vocal': { - 'Echo': { - 'Dry/Wet': {'intro': 0.06, 'build': 0.10, 'drop': 0.05, 'break': 0.15, 'outro': 0.08}, - 'Feedback': {'intro': 0.25, 'build': 0.38, 'drop': 0.28, 'break': 0.45, 'outro': 0.30}, - }, - 'Compressor': { - 'Threshold': {'intro': -16.0, 'build': -17.0, 'drop': -19.0, 'break': -15.0, 'outro': -15.5}, - 'Ratio': {'intro': 2.8, 'build': 3.2, 'drop': 3.8, 'break': 2.5, 'outro': 2.7}, - }, - 'Hybrid Reverb': { - 'Dry/Wet': {'intro': 0.04, 'build': 0.08, 'drop': 0.03, 'break': 0.12, 'outro': 0.06}, - 'Decay Time': {'intro': 2.0, 'build': 2.8, 'drop': 1.5, 'break': 3.5, 'outro': 2.5}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 8500.0, 'build': 10500.0, 'drop': 13000.0, 'break': 7200.0, 'outro': 8000.0}, - 'Dry/Wet': {'intro': 0.06, 'build': 0.10, 'drop': 0.04, 'break': 0.14, 'outro': 0.08}, - }, - }, - 'fx': { - 'Auto Filter': { - 'Frequency': {'intro': 6500.0, 'build': 9500.0, 'drop': 12000.0, 'break': 5500.0, 'outro': 6000.0}, - 'Dry/Wet': {'intro': 0.12, 'build': 0.10, 'drop': 0.06, 'break': 0.18, 'outro': 0.14}, - 'Resonance': {'intro': 0.15, 'build': 0.22, 'drop': 0.12, 'break': 0.28, 'outro': 0.18}, - }, - 'Hybrid Reverb': { - 'Dry/Wet': {'intro': 0.15, 'build': 0.18, 'drop': 0.10, 'break': 0.22, 'outro': 0.16}, - 'Decay Time': {'intro': 2.5, 'build': 3.2, 'drop': 2.0, 'break': 4.0, 'outro': 3.0}, - }, - 'Limiter': { - 'Gain': {'intro': -0.2, 'build': 0.0, 'drop': 0.2, 'break': -0.3, 'outro': -0.1}, - }, - 'Saturator': { - 'Drive': {'intro': 0.5, 'build': 1.2, 'drop': 2.0, 'break': 0.8, 'outro': 0.6}, - 'Dry/Wet': {'intro': 0.08, 'build': 0.12, 'drop': 0.18, 'break': 0.10, 'outro': 0.10}, - }, - }, -} - -# ============================================================================= -# ENHANCED MASTER Device Automation - Section Energy Response -# ============================================================================= - -MASTER_DEVICE_AUTOMATION = { - 'Utility': { - 'Stereo Width': {'intro': 1.04, 'build': 1.08, 'drop': 1.10, 'break': 1.12, 'outro': 1.06}, - 'Gain': {'intro': 0.6, 'build': 0.8, 'drop': 1.0, 'break': 0.5, 'outro': 0.5}, - }, - 'Saturator': { - 'Drive': {'intro': 0.2, 'build': 0.35, 'drop': 0.5, 'break': 0.15, 'outro': 0.18}, - 'Dry/Wet': {'intro': 0.10, 'build': 0.18, 'drop': 0.25, 'break': 0.08, 'outro': 0.12}, - }, - 'Compressor': { - 'Ratio': {'intro': 0.55, 'build': 0.62, 'drop': 0.70, 'break': 0.50, 'outro': 0.52}, - 'Threshold': {'intro': -10.0, 'build': -12.0, 'drop': -14.0, 'break': -8.0, 'outro': -9.0}, - 'Attack': {'intro': 0.020, 'build': 0.015, 'drop': 0.010, 'break': 0.025, 'outro': 0.022}, - 'Release': {'intro': 0.15, 'build': 0.12, 'drop': 0.08, 'break': 0.18, 'outro': 0.16}, - }, - 'Limiter': { - 'Gain': {'intro': 1.0, 'build': 1.2, 'drop': 1.4, 'break': 0.9, 'outro': 0.95}, - 'Ceiling': {'intro': -0.5, 'build': -0.8, 'drop': -1.0, 'break': -0.3, 'outro': -0.4}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 8000.0, 'build': 11000.0, 'drop': 15000.0, 'break': 6000.0, 'outro': 7000.0}, - 'Dry/Wet': {'intro': 0.05, 'build': 0.03, 'drop': 0.02, 'break': 0.08, 'outro': 0.06}, - }, - 'Echo': { - 'Dry/Wet': {'intro': 0.02, 'build': 0.06, 'drop': 0.04, 'break': 0.08, 'outro': 0.04}, - 'Feedback': {'intro': 0.15, 'build': 0.28, 'drop': 0.20, 'break': 0.32, 'outro': 0.22}, - }, -} - -# Safety clamps for device parameters to prevent extreme values -DEVICE_PARAMETER_SAFETY_CLAMPS = { - 'Drive': {'min': 0.0, 'max': 6.0}, - 'Frequency': {'min': 20.0, 'max': 20000.0}, - 'Dry/Wet': {'min': 0.0, 'max': 1.0}, - 'Feedback': {'min': 0.0, 'max': 0.7}, - 'Stereo Width': {'min': 0.0, 'max': 1.3}, - 'Resonance': {'min': 0.0, 'max': 1.0}, - 'Ratio': {'min': 1.0, 'max': 20.0}, - 'Threshold': {'min': -60.0, 'max': 0.0}, - 'Attack': {'min': 0.0001, 'max': 0.5}, - 'Release': {'min': 0.001, 'max': 2.0}, - 'Gain': {'min': -1.0, 'max': 1.8}, - 'Decay Time': {'min': 0.1, 'max': 10.0}, -} - -MASTER_SAFETY_CLAMPS = { - 'Stereo Width': {'min': 0.0, 'max': 1.25}, - 'Drive': {'min': 0.0, 'max': 1.5}, - 'Ratio': {'min': 0.45, 'max': 0.9}, - 'Gain': {'min': 0.0, 'max': 1.6}, - 'Attack': {'min': 0.0001, 'max': 0.1}, - 'Ceiling': {'min': -3.0, 'max': 0.0}, -} \ No newline at end of file diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/fx_group_loader.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/fx_group_loader.py deleted file mode 100644 index 6c7e6ec..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/fx_group_loader.py +++ /dev/null @@ -1,170 +0,0 @@ -import json -import socket -from datetime import datetime - -LOG_FILE = r"C:\Users\ren\Documents\Ableton\Logs\fx_group.txt" - -def log(msg): - timestamp = datetime.now().isoformat() - entry = f"[{timestamp}] {msg}" - print(entry) - with open(LOG_FILE, "a", encoding="utf-8") as f: - f.write(entry + "\n") - -class AbletonSocketClient: - def __init__(self, host="127.0.0.1", port=9877, timeout=30.0): - self.host = host - self.port = port - self.timeout = timeout - - def send(self, command_type, params=None): - payload = json.dumps({ - "type": command_type, - "params": params or {}, - }).encode("utf-8") + b"\n" - - with socket.create_connection((self.host, self.port), timeout=self.timeout) as sock: - sock.sendall(payload) - reader = sock.makefile("r", encoding="utf-8") - try: - line = reader.readline() - finally: - reader.close() - try: - sock.shutdown(socket.SHUT_RDWR) - except OSError: - pass - - if not line: - raise RuntimeError(f"No response for command: {command_type}") - return json.loads(line) - -def set_input_routing(client, track_index, routing_name): - result = client.send("set_track_input_routing", { - "index": track_index, - "routing_name": routing_name - }) - return result - -def main(): - log("=" * 60) - log("FX GROUP - TRANSITION FX LOADER") - log("=" * 60) - - client = AbletonSocketClient() - - RISER_TRACK = 20 - DOWNLIFTER_TRACK = 21 - CRASH_TRACK = 22 - IMPACT_TRACK = 23 - NOISE_TRACK = 24 - REVERSE_TRACK = 25 - - RISER_PATH = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\textures\fx\BBH - Primer Impacto -Risers 1.wav" - DOWNLIFTER_PATH = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\all_tracks\BBH - Primer Impacto -Downfilters 1.wav" - CRASH_PATH = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\oneshots\fx\BBH - Primer Impacto - Crash 2.wav" - IMPACT_PATH = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\oneshots\fx\BBH - Primer Impacto -Impact 1.wav" - NOISE_PATH = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\all_tracks\EFX_01_Em_125.wav" - REVERSE_PATH = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\organized_samples\textures\fx\BBH - Primer Impacto -Risers 4.wav" - - RISER_POSITIONS = [14, 46, 78, 110, 142, 174] - DOWNLIFTER_POSITIONS = [16, 48, 80, 112, 144, 176] - CRASH_POSITIONS = [0, 32, 64, 96, 128, 160, 192] - IMPACT_POSITIONS = [16, 48, 80, 112, 144] - NOISE_POSITIONS = [14, 46, 78, 110, 142, 174] - REVERSE_POSITIONS = [14, 30, 62, 94, 126] - - log(f"Track indices:") - log(f" RISER={RISER_TRACK}, DOWNLIFTER={DOWNLIFTER_TRACK}, CRASH={CRASH_TRACK}") - log(f" IMPACT={IMPACT_TRACK}, NOISE={NOISE_TRACK}, REVERSE={REVERSE_TRACK}") - - log("") - log("Step 1: Placing RISER samples...") - log(f" Positions: {RISER_POSITIONS}") - log(f" File: {RISER_PATH}") - result = client.send("create_arrangement_audio_pattern", { - "track_index": RISER_TRACK, - "file_path": RISER_PATH, - "positions": RISER_POSITIONS, - "name": "RISER FX" - }) - log(f" Result: {json.dumps(result, indent=2)}") - - log("") - log("Step 2: Placing DOWNLIFTER samples...") - log(f" Positions: {DOWNLIFTER_POSITIONS}") - log(f" File: {DOWNLIFTER_PATH}") - result = client.send("create_arrangement_audio_pattern", { - "track_index": DOWNLIFTER_TRACK, - "file_path": DOWNLIFTER_PATH, - "positions": DOWNLIFTER_POSITIONS, - "name": "DOWNLIFTER FX" - }) - log(f" Result: {json.dumps(result, indent=2)}") - - log("") - log("Step 3: Placing CRASH samples...") - log(f" Positions: {CRASH_POSITIONS}") - log(f" File: {CRASH_PATH}") - result = client.send("create_arrangement_audio_pattern", { - "track_index": CRASH_TRACK, - "file_path": CRASH_PATH, - "positions": CRASH_POSITIONS, - "name": "CRASH FX" - }) - log(f" Result: {json.dumps(result, indent=2)}") - - log("") - log("Step 4: Placing IMPACT samples...") - log(f" Positions: {IMPACT_POSITIONS}") - log(f" File: {IMPACT_PATH}") - result = client.send("create_arrangement_audio_pattern", { - "track_index": IMPACT_TRACK, - "file_path": IMPACT_PATH, - "positions": IMPACT_POSITIONS, - "name": "IMPACT FX" - }) - log(f" Result: {json.dumps(result, indent=2)}") - - log("") - log("Step 5: Placing NOISE SWEEP samples...") - log(f" Positions: {NOISE_POSITIONS}") - log(f" File: {NOISE_PATH}") - result = client.send("create_arrangement_audio_pattern", { - "track_index": NOISE_TRACK, - "file_path": NOISE_PATH, - "positions": NOISE_POSITIONS, - "name": "NOISE FX" - }) - log(f" Result: {json.dumps(result, indent=2)}") - - log("") - log("Step 6: Placing REVERSE FX samples...") - log(f" Positions: {REVERSE_POSITIONS}") - log(f" File: {REVERSE_PATH}") - result = client.send("create_arrangement_audio_pattern", { - "track_index": REVERSE_TRACK, - "file_path": REVERSE_PATH, - "positions": REVERSE_POSITIONS, - "name": "REVERSE FX" - }) - log(f" Result: {json.dumps(result, indent=2)}") - - log("") - log("=" * 60) - log("Setting input routing to 'No Input' for all FX tracks...") - log("=" * 60) - - for track_idx, track_name in [(RISER_TRACK, "RISER"), (DOWNLIFTER_TRACK, "DOWNLIFTER"), - (CRASH_TRACK, "CRASH"), (IMPACT_TRACK, "IMPACT"), - (NOISE_TRACK, "NOISE SWEEP"), (REVERSE_TRACK, "REVERSE FX")]: - result = set_input_routing(client, track_idx, "No Input") - log(f" {track_name} (track {track_idx}): {result}") - - log("") - log("=" * 60) - log("FX GROUP COMPLETE") - log("=" * 60) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/reference_listener.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/reference_listener.py deleted file mode 100644 index 2eeb6a4..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/reference_listener.py +++ /dev/null @@ -1,4834 +0,0 @@ -""" -reference_listener.py - Reference-track audio analysis and sample matching. - -Improved for Phase 4: -- Enhanced section detection (intro, verse, build, drop, break, outro) -- Better role detection per segment -- Precise one-shot vs loop classification -- Improved clap, hat, bass loop, vocal, fx detection -- Family repetition penalty system -""" - -from __future__ import annotations - -import json -import logging -import math -import random -import warnings -import gzip -import hashlib -import time -from collections import defaultdict, deque -from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple - -import numpy as np - -try: - import librosa -except ImportError: # pragma: no cover - librosa = None - -try: - import torch - import torch.nn.functional as F -except ImportError: # pragma: no cover - torch = None - F = None - -try: - import torch_directml -except ImportError: # pragma: no cover - torch_directml = None - - -logger = logging.getLogger("ReferenceListener") - -NOTE_NAMES = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B'] -KEY_PROFILES = { - 'major': [6.35, 2.23, 3.48, 2.33, 4.38, 4.09, 2.52, 5.19, 2.39, 3.66, 2.29, 2.88], - 'minor': [6.33, 2.68, 3.52, 5.38, 2.60, 3.53, 2.54, 4.75, 3.98, 2.69, 3.34, 3.17], -} - -_cross_generation_reference_family_memory: Dict[str, int] = defaultdict(int) -_cross_generation_reference_path_memory: Dict[str, int] = defaultdict(int) - -# Section type definitions with characteristic energy patterns -# Enhanced with clearer energy thresholds and additional features for robust detection -SECTION_PROFILES = { - 'intro': { - 'energy_range': (0.0, 0.35), - 'onset_density': (0.0, 0.4), - 'spectral_brightness': (0.0, 0.5), - 'energy_stability': (0.4, 1.0), - 'typical_position': (0.0, 0.15), - 'min_bars': 4, - 'max_bars': 32, - }, - 'verse': { - 'energy_range': (0.25, 0.55), - 'onset_density': (0.3, 0.6), - 'spectral_brightness': (0.3, 0.6), - 'energy_stability': (0.5, 1.0), - 'typical_position': (0.1, 0.7), - 'min_bars': 8, - 'max_bars': 32, - }, - 'build': { - 'energy_range': (0.45, 0.85), - 'onset_density': (0.5, 0.9), - 'spectral_brightness': (0.5, 0.8), - 'energy_stability': (0.0, 0.6), - 'energy_slope': (0.05, 1.0), - 'typical_position': (0.15, 0.85), - 'min_bars': 4, - 'max_bars': 24, - 'rising': True, - }, - 'drop': { - 'energy_range': (0.65, 1.0), - 'onset_density': (0.5, 1.0), - 'spectral_brightness': (0.5, 1.0), - 'energy_stability': (0.5, 1.0), - 'typical_position': (0.2, 0.9), - 'min_bars': 8, - 'max_bars': 64, - }, - 'break': { - 'energy_range': (0.1, 0.45), - 'onset_density': (0.1, 0.4), - 'spectral_brightness': (0.2, 0.5), - 'energy_stability': (0.4, 1.0), - 'typical_position': (0.3, 0.7), - 'min_bars': 4, - 'max_bars': 24, - }, - 'outro': { - 'energy_range': (0.05, 0.4), - 'onset_density': (0.05, 0.5), - 'spectral_brightness': (0.1, 0.4), - 'energy_stability': (0.0, 0.6), - 'energy_slope': (-1.0, -0.02), - 'typical_position': (0.82, 1.0), - 'min_bars': 4, - 'max_bars': 32, - 'falling': True, - }, -} - -SECTION_CONFIDENCE_THRESHOLDS = { - 'high': 0.75, - 'medium': 0.55, - 'low': 0.35, - 'ambiguous': 0.20, -} - -# Spectral signatures for role detection -SPECTRAL_ROLE_SIGNATURES = { - 'kick': {'centroid_range': (50, 400), 'rolloff_range': (200, 2000), 'rms_spread': (0.4, 1.0), 'transient_score': (0.6, 1.0)}, - 'clap': {'centroid_range': (800, 4000), 'rolloff_range': (2000, 8000), 'rms_spread': (0.2, 0.7), 'transient_score': (0.7, 1.0)}, - 'hat': {'centroid_range': (4000, 12000), 'rolloff_range': (6000, 14000), 'rms_spread': (0.1, 0.4), 'transient_score': (0.5, 1.0)}, - 'bass_loop': {'centroid_range': (60, 500), 'rolloff_range': (200, 2000), 'rms_spread': (0.5, 1.0), 'periodicity': (0.6, 1.0)}, - 'vocal': {'centroid_range': (200, 3000), 'rolloff_range': (1000, 5000), 'rms_spread': (0.3, 0.8), 'harmonic_ratio': (0.4, 0.9)}, - 'fx': {'centroid_range': (1000, 8000), 'rolloff_range': (3000, 12000), 'rms_spread': (0.2, 0.9), 'spectral_flux': (0.5, 1.0)}, -} - -# Roles elegibles para variación por sección -# Estos roles pueden usar diferentes samples en diferentes secciones -SECTION_VARIATION_ROLES = [ - 'perc', 'perc_alt', 'top_loop', 'vocal_shot', 'synth_peak', 'atmos' -] - -# Variaciones permitidas por tipo de sección -SECTION_VARIANTS = { - 'intro': ['sparse', 'minimal'], - 'verse': ['standard', 'sparse'], - 'build': ['building', 'dense'], - 'drop': ['full', 'peak'], - 'break': ['sparse', 'atmospheric'], - 'outro': ['fading', 'minimal'] -} - -ROLE_VECTOR_TYPES = { - 'kick': set(), - 'snare': set(), - 'hat': set(), - 'bass_loop': {'bass'}, - 'perc_loop': {'drum loop', 'top'}, - 'top_loop': {'top', 'drum loop'}, - 'synth_loop': {'synth loop', 'synth'}, - 'vocal_loop': {'vocal'}, - 'crash_fx': {'fx'}, - 'fill_fx': {'fx'}, - 'snare_roll': {'fx'}, - 'atmos_fx': {'fx', 'synth'}, - 'vocal_shot': {'vocal'}, -} - -ROLE_SEGMENT_SETTINGS = { - 'kick': {'windows': {1.0, 2.0}, 'section_kinds': {'intro', 'verse', 'build', 'drop'}, 'top_k': 10}, - 'snare': {'windows': {1.0, 2.0}, 'section_kinds': {'verse', 'build', 'drop'}, 'top_k': 10}, - 'hat': {'windows': {1.0, 2.0}, 'section_kinds': {'intro', 'verse', 'build', 'drop'}, 'top_k': 12}, - 'bass_loop': {'windows': {4.0, 8.0}, 'section_kinds': {'verse', 'build', 'drop'}, 'top_k': 8}, - 'perc_loop': {'windows': {2.0, 4.0, 8.0}, 'section_kinds': {'verse', 'build', 'drop'}, 'top_k': 8}, - 'top_loop': {'windows': {2.0, 4.0, 8.0}, 'section_kinds': {'verse', 'build', 'drop'}, 'top_k': 8}, - 'synth_loop': {'windows': {4.0, 8.0}, 'section_kinds': {'build', 'drop', 'break'}, 'top_k': 8}, - 'vocal_loop': {'windows': {2.0, 4.0, 8.0}, 'section_kinds': {'verse', 'build', 'drop', 'break'}, 'top_k': 8}, - 'crash_fx': {'windows': {1.0, 2.0, 4.0}, 'section_kinds': {'build', 'drop', 'intro', 'outro'}, 'top_k': 6}, - 'fill_fx': {'windows': {1.0, 2.0, 4.0}, 'section_kinds': {'build', 'break', 'drop'}, 'top_k': 6}, - 'snare_roll': {'windows': {1.0, 2.0, 4.0}, 'section_kinds': {'build', 'drop'}, 'top_k': 6}, - 'atmos_fx': {'windows': {4.0, 8.0}, 'section_kinds': {'intro', 'break', 'outro'}, 'top_k': 6}, - 'vocal_shot': {'windows': {1.0, 2.0, 4.0}, 'section_kinds': {'verse', 'build', 'drop'}, 'top_k': 8}, -} - -ROLE_DURATION_WINDOWS = { - 'kick': (0.05, 2.5), - 'snare': (0.05, 3.0), - 'hat': (0.05, 2.0), - 'bass_loop': (0.75, 32.0), - 'perc_loop': (0.75, 32.0), - 'top_loop': (0.75, 32.0), - 'synth_loop': (0.75, 32.0), - 'vocal_loop': (0.75, 32.0), - 'crash_fx': (0.05, 12.0), - 'fill_fx': (0.15, 12.0), - 'snare_roll': (0.15, 12.0), - 'atmos_fx': (0.25, 32.0), - 'vocal_shot': (0.05, 3.5), -} - - -def _safe_float(value: Any, default: float = 0.0) -> float: - try: - return float(np.atleast_1d(value)[0]) - except Exception: - return float(default) - - -def _normalize_chroma(chroma: np.ndarray) -> np.ndarray: - chroma = np.asarray(chroma, dtype=np.float32).reshape(12) - total = float(np.sum(chroma)) - if total <= 1e-9: - return chroma - return chroma / total - - -def _adaptive_n_fft(audio_length: int, default_n_fft: int = 2048, min_n_fft: int = 512) -> int: - """Calcula n_fft adaptativo basado en la longitud del audio.""" - max_n_fft = audio_length // 2 - adaptive = max(min_n_fft, min(default_n_fft, max_n_fft)) - if adaptive < default_n_fft: - logger.debug("Using reduced n_fft=%d for short audio (len=%d)", adaptive, audio_length) - return adaptive - - -def _detect_key(chroma: np.ndarray) -> Tuple[Optional[str], float]: - chroma = _normalize_chroma(chroma) - best_key = None - best_score = -999.0 - - for mode, profile in KEY_PROFILES.items(): - profile_array = np.asarray(profile, dtype=np.float32) - for index in range(12): - score = np.corrcoef(chroma, np.roll(profile_array, index))[0, 1] - if np.isnan(score): - continue - if score > best_score: - best_score = float(score) - best_key = NOTE_NAMES[index] + ('m' if mode == 'minor' else '') - - return best_key, best_score if best_key else 0.0 - - -def _key_distance(left: Optional[str], right: Optional[str]) -> int: - if not left or not right: - return 6 - - def _index(key_name: str) -> int: - base = key_name[:-1] if key_name.endswith('m') else key_name - return NOTE_NAMES.index(base) if base in NOTE_NAMES else 0 - - return min((_index(left) - _index(right)) % 12, (_index(right) - _index(left)) % 12) - - -class SectionDetector: - """Detects structural sections from audio analysis with improved segmentation.""" - - def __init__(self, hop_length: int = 512, sr: int = 22050): - self.hop_length = hop_length - self.sr = sr - self.min_section_bars = 4 - self.max_section_bars = 64 - self.min_section_seconds = 6.0 - self.max_section_seconds = 120.0 - self.energy_smoothing_window = 2.0 - self.boundary_sensitivity = 0.65 - self.min_energy_diff_for_boundary = 0.08 - self.ambiguity_threshold = 0.25 - - def _compute_segment_features(self, rms: np.ndarray, onset: np.ndarray, - centroid: np.ndarray, start_frame: int, - end_frame: int, rms_global_max: float = None) -> Dict[str, float]: - """Compute normalized features for a segment.""" - rms_seg = rms[start_frame:end_frame] - onset_seg = onset[start_frame:end_frame] - centroid_seg = centroid[start_frame:end_frame] - - if len(rms_seg) == 0: - return {'energy': 0.0, 'onset_density': 0.0, 'brightness': 0.0, 'flux': 0.0, - 'energy_stability': 1.0, 'onset_variability': 0.0} - - rms_global_max = rms_global_max if rms_global_max is not None else float(np.max(rms)) - rms_global_max = max(rms_global_max, 0.001) - - energy = float(np.mean(rms_seg)) - onset_density = float(np.mean(onset_seg)) / 5.0 - brightness = float(np.mean(centroid_seg)) / 10000.0 - - if len(centroid_seg) > 1: - flux = float(np.mean(np.abs(np.diff(centroid_seg)))) / 2000.0 - else: - flux = 0.0 - - energy_stability = 1.0 - if len(rms_seg) > 1: - energy_cv = float(np.std(rms_seg)) / max(float(np.mean(rms_seg)), 0.001) - energy_stability = min(1.0, max(0.0, 1.0 - energy_cv * 2.0)) - - onset_variability = 0.0 - if len(onset_seg) > 1: - onset_std = float(np.std(onset_seg)) - onset_mean = max(float(np.mean(onset_seg)), 0.001) - onset_variability = min(1.0, onset_std / onset_mean) - - return { - 'energy': min(1.0, max(0.0, (energy / rms_global_max) * 1.5)), - 'onset_density': min(1.0, max(0.0, onset_density)), - 'brightness': min(1.0, max(0.0, brightness)), - 'flux': min(1.0, max(0.0, flux)), - 'energy_stability': round(energy_stability, 3), - 'onset_variability': round(onset_variability, 3) - } - - def _compute_richer_section_features( - self, - y: np.ndarray, - sr: int, - rms: np.ndarray, - onset_env: np.ndarray, - centroid: np.ndarray, - start_time: float, - end_time: float, - hop_length: int = 512, - n_fft: int = 2048 - ) -> Dict[str, float]: - """ - Compute richer per-section features for better reference matching. - - Returns energy_mean, energy_peak, energy_slope, spectral_centroid_mean, - spectral_centroid_std, onset_rate, low_energy_ratio, high_energy_ratio. - """ - duration = end_time - start_time - if duration < 1.0: - return { - 'energy_mean': 0.0, - 'energy_peak': 0.0, - 'energy_slope': 0.0, - 'spectral_centroid_mean': 0.0, - 'spectral_centroid_std': 0.0, - 'onset_rate': 0.0, - 'low_energy_ratio': 0.0, - 'high_energy_ratio': 0.0, - } - - frames_per_second = sr / hop_length - start_frame = int(start_time * frames_per_second) - end_frame = int(end_time * frames_per_second) - - start_frame = max(0, min(start_frame, len(rms) - 1)) - end_frame = max(start_frame + 1, min(end_frame, len(rms))) - - section_rms = rms[start_frame:end_frame] - section_onset = onset_env[start_frame:end_frame] - section_centroid = centroid[start_frame:end_frame] - - if len(section_rms) == 0: - return { - 'energy_mean': 0.0, - 'energy_peak': 0.0, - 'energy_slope': 0.0, - 'spectral_centroid_mean': 0.0, - 'spectral_centroid_std': 0.0, - 'onset_rate': 0.0, - 'low_energy_ratio': 0.0, - 'high_energy_ratio': 0.0, - } - - # Energy metrics (normalized 0-1) - rms_max_global = float(np.max(rms)) if len(rms) > 0 else 0.01 - energy_mean = float(np.mean(section_rms)) - energy_peak = float(np.max(section_rms)) - energy_mean_norm = min(1.0, (energy_mean / max(rms_max_global, 0.001)) * 2.0) - energy_peak_norm = min(1.0, (energy_peak / max(rms_max_global, 0.001)) * 1.5) - - # Energy slope (trend within section) - if len(section_rms) > 2: - x = np.arange(len(section_rms)) - slope, _ = np.polyfit(x, section_rms, 1) - energy_slope_norm = float(np.clip(slope * 100, -1.0, 1.0)) - else: - energy_slope_norm = 0.0 - - # Spectral centroid metrics - centroid_mean = float(np.mean(section_centroid)) - centroid_std = float(np.std(section_centroid)) if len(section_centroid) > 1 else 0.0 - centroid_mean_norm = min(1.0, centroid_mean / 10000.0) - centroid_std_norm = min(1.0, centroid_std / 6000.0) - - # Onset rate (onsets per second) - onset_threshold = float(np.mean(section_onset)) + float(np.std(section_onset)) * 0.5 - onset_count = int(np.sum(section_onset > onset_threshold)) - onset_rate = onset_count / max(duration, 0.1) - onset_rate_norm = min(1.0, onset_rate / 20.0) - - # Low and high energy ratios (STFT-based frequency analysis) - start_sample = int(start_time * sr) - end_sample = int(end_time * sr) - start_sample = max(0, min(start_sample, len(y) - 1)) - end_sample = max(start_sample + 512, min(end_sample, len(y))) - - try: - S = np.abs(librosa.stft(y[start_sample:end_sample], n_fft=n_fft)) - freqs = librosa.fft_frequencies(sr=sr, n_fft=n_fft) - total_energy = float(np.sum(S ** 2)) + 1e-10 - - low_mask = freqs < 300 - high_mask = freqs > 4000 - - low_energy = float(np.sum(S[low_mask, :] ** 2)) - high_energy = float(np.sum(S[high_mask, :] ** 2)) - - low_energy_ratio = min(1.0, low_energy / total_energy) - high_energy_ratio = min(1.0, high_energy / total_energy) - except Exception: - low_energy_ratio = 0.0 - high_energy_ratio = 0.0 - - return { - 'energy_mean': round(energy_mean_norm, 4), - 'energy_peak': round(energy_peak_norm, 4), - 'energy_slope': round(energy_slope_norm, 4), - 'spectral_centroid_mean': round(centroid_mean_norm, 4), - 'spectral_centroid_std': round(centroid_std_norm, 4), - 'onset_rate': round(onset_rate_norm, 4), - 'low_energy_ratio': round(low_energy_ratio, 4), - 'high_energy_ratio': round(high_energy_ratio, 4), - } - - def _compute_section_kind_confidence( - self, - kind: str, - features: Dict[str, float], - position_ratio: float, - prev_features: Optional[Dict[str, float]] - ) -> Tuple[float, List[str]]: - """ - Compute confidence score for section kind classification. - - Returns (confidence, alternatives) where: - - confidence is 0.0-1.0 with clear semantic thresholds: - - 0.75+: high confidence (section type is clear) - - 0.55-0.75: medium confidence (likely correct but could be alternative) - - 0.35-0.55: low confidence (ambiguous, check alternatives) - - <0.35: very low confidence (section may be misclassified) - - alternatives is list of 1-2 other plausible kinds - - Enhanced with energy trend, onset variability, positional context, and feature matching. - """ - energy = features.get('energy', 0.5) - onset_density = features.get('onset_density', 0.5) - onset_var = features.get('onset_variability', 0.0) - stability = features.get('energy_stability', 1.0) - brightness = features.get('brightness', 0.5) - - energy_mean = features.get('energy_mean', energy) - onset_rate = features.get('onset_rate', onset_density) - - energy_trend = features.get('energy_trend', 0.0) - if energy_trend == 0.0 and prev_features: - prev_energy = prev_features.get('energy', energy) - energy_trend = energy - prev_energy - - profile = SECTION_PROFILES.get(kind, {}) - confidence = 0.35 - alternatives = [] - - prev_energy = prev_features.get('energy', energy) if prev_features else energy - energy_rising = energy_trend > 0.08 - energy_falling = energy_trend < -0.08 - - def _match_range(value: float, range_tuple: Tuple[float, float]) -> float: - if not range_tuple: - return 0.5 - lo, hi = range_tuple - if lo <= value <= hi: - center = (lo + hi) / 2 - spread = (hi - lo) / 2 - dist_from_center = abs(value - center) - return 1.0 - (dist_from_center / (spread * 2 + 0.01)) - elif value < lo: - return max(0.0, 1.0 - (lo - value) * 2) - else: - return max(0.0, 1.0 - (value - hi) * 2) - - energy_match = _match_range(energy_mean, profile.get('energy_range', (0.0, 1.0))) - onset_match = _match_range(onset_rate, profile.get('onset_density', (0.0, 1.0))) - brightness_match = _match_range(brightness, profile.get('spectral_brightness', (0.0, 1.0))) - stability_match = _match_range(stability, profile.get('energy_stability', (0.0, 1.0))) - - pos_range = profile.get('typical_position', (0.0, 1.0)) - position_match = _match_range(position_ratio, pos_range) - - base_feature_score = (energy_match * 0.35 + onset_match * 0.25 + brightness_match * 0.15 + stability_match * 0.15 + position_match * 0.10) - - if kind == 'intro': - if prev_features is None: - confidence = 0.85 + base_feature_score * 0.15 - elif position_ratio < 0.12 and energy_mean < 0.32: - confidence = 0.78 + base_feature_score * 0.18 - elif position_ratio < 0.18 and energy_mean < 0.40: - confidence = 0.62 + base_feature_score * 0.15 - elif position_ratio < 0.22 and energy_mean < 0.45: - confidence = 0.48 + base_feature_score * 0.12 - else: - confidence = 0.30 + base_feature_score * 0.10 - if energy_mean > 0.55: - confidence -= 0.18 - if energy_rising and position_ratio > 0.1: - confidence -= 0.10 - alternatives = ['verse', 'break', 'build'] - - elif kind == 'outro': - if position_ratio > 0.90: - confidence = 0.88 + base_feature_score * 0.12 - elif position_ratio > 0.85 and energy_mean < 0.35: - confidence = 0.75 + base_feature_score * 0.15 - elif position_ratio > 0.80 and energy_mean < 0.42: - confidence = 0.58 + base_feature_score * 0.12 - else: - confidence = 0.32 + base_feature_score * 0.08 - if energy_falling: - confidence += 0.12 - if energy_mean > 0.55: - confidence -= 0.12 - alternatives = ['break', 'verse', 'build'] - - elif kind == 'drop': - if energy_mean > 0.72 and onset_rate > 0.48 and stability > 0.55: - confidence = 0.92 + (energy_mean - 0.72) * 0.3 - elif energy_mean > 0.62 and onset_rate > 0.40: - confidence = 0.78 + base_feature_score * 0.15 - elif energy_mean > 0.52 and onset_rate > 0.35: - confidence = 0.55 + base_feature_score * 0.12 - else: - confidence = 0.30 + base_feature_score * 0.08 - if 0.25 < position_ratio < 0.75: - confidence += 0.05 - if position_ratio < 0.18: - confidence -= 0.15 - alternatives = ['build', 'verse'] - - elif kind == 'build': - slope_range = profile.get('energy_slope', (0.0, 1.0)) - slope_match = _match_range(energy_trend, slope_range) if slope_range else 0.5 - - if energy_rising and 0.40 < energy_mean < 0.72: - confidence = 0.82 + slope_match * 0.15 - if onset_var > 0.25: - confidence = min(confidence + 0.08, 0.95) - elif energy_rising and 0.35 < energy_mean < 0.78: - confidence = 0.62 + slope_match * 0.18 - elif 0.35 < energy_mean < 0.72 and not energy_falling: - confidence = 0.45 + base_feature_score * 0.15 - else: - confidence = 0.28 + base_feature_score * 0.08 - if position_ratio < 0.12 or position_ratio > 0.88: - confidence -= 0.12 - alternatives = ['drop', 'verse', 'break'] - - elif kind == 'break': - if energy_mean < 0.35 and onset_rate < 0.30 and stability > 0.50: - confidence = 0.85 + base_feature_score * 0.12 - elif energy_mean < 0.42 and onset_rate < 0.38: - confidence = 0.65 + base_feature_score * 0.10 - elif energy_mean < 0.48 and onset_rate < 0.45: - confidence = 0.42 + base_feature_score * 0.08 - else: - confidence = 0.28 + base_feature_score * 0.06 - if 0.25 < position_ratio < 0.75: - confidence += 0.06 - if brightness > 0.55: - confidence -= 0.06 - alternatives = ['intro', 'outro', 'verse'] - - elif kind == 'verse': - if 0.25 < energy_mean < 0.58 and 0.25 < onset_rate < 0.65 and stability > 0.45: - confidence = 0.72 + base_feature_score * 0.15 - elif 0.28 < energy_mean < 0.55: - confidence = 0.52 + base_feature_score * 0.12 - else: - confidence = 0.35 + base_feature_score * 0.08 - if 0.15 < position_ratio < 0.75: - confidence += 0.05 - alternatives = ['build', 'drop', 'break'] - - else: - confidence = 0.40 + base_feature_score * 0.10 - alternatives = ['verse', 'drop'] - - total_sections = features.get('total_sections', 4) - if total_sections <= 2: - confidence = min(confidence * 0.90, 0.95) - elif total_sections >= 8: - pass - - confidence = max(0.15, min(0.98, confidence)) - - return round(confidence, 3), alternatives - - def _section_character_bonus( - self, - role: str, - candidate_analysis: Dict[str, Any], - section_features: Dict[str, Any] - ) -> float: - """ - Compute a character bonus for matching a candidate sample to a section. - - Returns a multiplier (1.0 = no change, max ~1.25) based on how well - the candidate's features match the section's acoustic character. - """ - if not section_features: - return 1.0 - - bonus = 1.0 - - onset_rate = float(section_features.get('onset_rate', 0.5)) - low_energy_ratio = float(section_features.get('low_energy_ratio', 0.0)) - high_energy_ratio = float(section_features.get('high_energy_ratio', 0.0)) - energy_slope = float(section_features.get('energy_slope', 0.0)) - energy_mean = float(section_features.get('energy_mean', 0.5)) - - candidate_centroid = float(candidate_analysis.get('spectral_centroid', 0.0) or 0.0) - candidate_onset = float(candidate_analysis.get('onset_mean', 0.0) or 0.0) - - role_lower = role.lower() - - # High onset rate section + high onset density candidate = bonus - if onset_rate > 0.4: - candidate_onset_norm = min(1.0, candidate_onset / 5.0) - if role_lower in {'hat', 'top_loop', 'perc_loop', 'perc'}: - if candidate_onset_norm > 0.6: - bonus = max(bonus, 1.0 + (candidate_onset_norm - 0.5) * 0.25) - - # High low-energy ratio + bass role = bonus - if low_energy_ratio > 0.4: - candidate_low_centroid = max(0.0, 1.0 - candidate_centroid / 3000.0) - if role_lower in {'bass_loop', 'sub_bass', 'bass'}: - if candidate_low_centroid > 0.5: - bonus = max(bonus, 1.0 + candidate_low_centroid * 0.15) - - # High high-energy ratio + hat/top role = bonus - if high_energy_ratio > 0.3: - candidate_high_centroid = min(1.0, candidate_centroid / 10000.0) - if role_lower in {'hat', 'top_loop', 'crash_fx'}: - if candidate_high_centroid > 0.5: - bonus = max(bonus, 1.0 + candidate_high_centroid * 0.12) - - # Building section (positive slope) + snare_roll/fill_fx = bonus - if energy_slope > 0.1: - if role_lower in {'snare_roll', 'fill_fx', 'riser'}: - bonus = max(bonus, 1.0 + energy_slope * 0.25) - - # Low energy section + atmos_fx = bonus - if energy_mean < 0.3: - if role_lower in {'atmos_fx', 'atmos', 'pad'}: - bonus = max(bonus, 1.0 + (0.3 - energy_mean) * 0.4) - - return min(1.25, max(1.0, round(bonus, 3))) - - def _get_role_section_features( - self, role: str, reference_sections: List[Dict[str, Any]], - role_segments: List[Dict[str, Any]] - ) -> Dict[str, Any]: - """Get the most relevant section features for a given role.""" - if not reference_sections: - return {} - - role_lower = role.lower() - - preferred_kinds: Dict[str, List[str]] = { - 'kick': ['drop', 'build'], - 'snare': ['drop', 'build'], - 'hat': ['drop', 'verse'], - 'bass_loop': ['drop', 'build'], - 'sub_bass': ['drop', 'build'], - 'top_loop': ['drop', 'verse'], - 'perc_loop': ['drop', 'build'], - 'synth_loop': ['drop', 'verse'], - 'vocal_loop': ['drop', 'verse'], - 'vocal_shot': ['drop', 'verse'], - 'snare_roll': ['build', 'intro'], - 'fill_fx': ['build', 'break'], - 'riser': ['build', 'intro'], - 'crash_fx': ['drop', 'intro', 'outro'], - 'atmos_fx': ['break', 'intro', 'outro'], - 'atmos': ['break', 'intro', 'outro'], - 'pad': ['break', 'intro'], - } - - kinds = preferred_kinds.get(role_lower, ['drop']) - - for section in reference_sections: - kind = str(section.get('kind', 'drop')).lower() - if kind in kinds: - return section.get('features', {}) - - if reference_sections: - for section in reference_sections: - if section.get('kind', 'drop') == 'drop': - return section.get('features', {}) - return reference_sections[0].get('features', {}) - - return {} - - def _find_boundary_peaks(self, energy_diff: np.ndarray, onset_peaks: np.ndarray, - threshold: float, min_gap_frames: int) -> List[int]: - """Find section boundary peaks combining energy changes and onset peaks with improved detection.""" - if len(energy_diff) == 0: - return [] - - threshold_val = float(threshold) - - energy_percentile = float(np.percentile(energy_diff, 75)) if len(energy_diff) > 10 else threshold_val - onset_percentile = float(np.percentile(onset_peaks, 55)) - - candidates = [] - for i in range(len(energy_diff)): - energy_score = float(energy_diff[i]) - onset_score = float(onset_peaks[i]) - - combined_score = energy_score * 0.6 + onset_score * 0.4 - - if energy_score > threshold_val and onset_score > onset_percentile * 0.8: - candidates.append((i, combined_score, 'both')) - elif energy_score > energy_percentile and onset_score > onset_percentile * 0.5: - candidates.append((i, combined_score * 0.7, 'energy')) - elif onset_score > float(np.percentile(onset_peaks, 85)) and energy_score > threshold_val * 0.5: - candidates.append((i, combined_score * 0.6, 'onset')) - - if not candidates: - for i in range(len(energy_diff)): - if float(energy_diff[i]) > threshold_val * 0.7: - candidates.append((i, float(energy_diff[i]), 'fallback')) - - candidates.sort(key=lambda x: x[1], reverse=True) - - boundaries = [] - for idx, score, method in candidates: - is_valid = True - for existing in boundaries: - if abs(idx - existing) < min_gap_frames: - is_valid = False - break - if is_valid: - boundaries.append(idx) - - boundaries.sort() - return boundaries - - def _validate_section_progression(self, sections: List[Dict[str, Any]], - duration: float, tempo: float) -> List[Dict[str, Any]]: - """Validate and fix section progression for musical coherence.""" - if not sections: - return [{'kind': 'drop', 'start': 0.0, 'end': duration, - 'duration': duration, 'bars': max(8, int(duration * tempo / 60 / 4)), - 'kind_confidence': 0.3, 'features': {'energy': 0.5}}] - - beats_per_second = tempo / 60.0 - seconds_per_bar = 4.0 / beats_per_second if beats_per_second > 0 else 2.0 - - result = [] - for i, section in enumerate(sections): - kind = section.get('kind', 'drop') - start = section.get('start', 0.0) - end = section.get('end', duration) - sec_duration = end - start - - estimated_bars = max(4, int(round(sec_duration / seconds_per_bar))) - if estimated_bars > self.max_section_bars: - kind = 'drop' if section.get('features', {}).get('energy', 0.5) > 0.6 else 'break' - if estimated_bars < self.min_section_bars and i > 0: - prev_section = result[-1] if result else None - if prev_section and prev_section.get('kind') == kind: - prev_section['end'] = end - prev_section['duration'] = end - prev_section['start'] - prev_section['bars'] += estimated_bars - continue - - section['bars'] = estimated_bars - section['beats'] = estimated_bars * 4 - result.append(section) - - for i, section in enumerate(result): - section['section_index'] = i - section['total_sections'] = len(result) - - return result - - def _compute_energy_transitions(self, sections: List[Dict[str, Any]]) -> List[Dict[str, Any]]: - """Compute energy transition direction between sections.""" - if len(sections) < 2: - return sections - - for i, section in enumerate(sections): - next_section = sections[i + 1] if i < len(sections) - 1 else None - prev_section = sections[i - 1] if i > 0 else None - - current_energy = section.get('features', {}).get('energy', 0.5) - next_energy = next_section.get('features', {}).get('energy', current_energy) if next_section else current_energy - prev_energy = prev_section.get('features', {}).get('energy', current_energy) if prev_section else current_energy - - energy_diff_next = next_energy - current_energy - energy_diff_prev = current_energy - prev_energy - - if energy_diff_next > 0.15: - section['energy_transition'] = 'rising' - elif energy_diff_next < -0.15: - section['energy_transition'] = 'falling' - else: - section['energy_transition'] = 'stable' - - section['energy_delta_next'] = round(energy_diff_next, 3) - section['energy_delta_prev'] = round(energy_diff_prev, 3) - - return sections - - def detect_sections(self, rms: np.ndarray, onset: np.ndarray, - centroid: np.ndarray, duration: float, - min_section_seconds: float = 8.0) -> List[Dict[str, Any]]: - """Detect sections from audio features with improved segmentation and edge case handling.""" - if len(rms) == 0 or duration < min_section_seconds * 1.5: - default_bars = max(8, int(duration * 128 / 60 / 4)) if duration > 0 else 8 - return [{'kind': 'drop', 'start': 0.0, 'end': duration, 'bars': default_bars, - 'duration': duration, 'kind_confidence': 0.35, - 'confidence_level': 'low', - 'features': {'energy': 0.5, 'onset_density': 0.5}, - 'detection_method': 'fallback_short_track'}] - - hop_time = self.hop_length / self.sr - frames_per_section = max(1, int(min_section_seconds / hop_time)) - - rms_global_max = float(np.max(rms)) if len(rms) > 0 else 0.01 - kernel_size = min(len(rms), max(1, int(self.energy_smoothing_window / hop_time))) - - if kernel_size > 1: - smoothed_rms = np.convolve(rms, np.ones(kernel_size) / kernel_size, mode='same') - else: - smoothed_rms = rms - - if len(smoothed_rms) > 1: - energy_diff = np.abs(np.diff(smoothed_rms)) - if len(energy_diff) > kernel_size: - energy_diff = np.convolve(energy_diff, np.ones(kernel_size) / kernel_size, mode='same') - else: - energy_diff = np.zeros(1) - - onset_binary = (onset > np.percentile(onset, 65)).astype(float) - onset_peaks = np.convolve(onset_binary, np.ones(kernel_size) / kernel_size, mode='same') - - base_threshold = max(float(np.percentile(energy_diff, 65)), 0.001) if len(energy_diff) > 10 else 0.001 - threshold = base_threshold * self.boundary_sensitivity - - primary_boundaries = self._find_boundary_peaks(energy_diff, onset_peaks, float(threshold), frames_per_section) - - secondary_threshold = float(threshold) * 0.55 - secondary_boundaries = self._find_boundary_peaks(energy_diff, onset_peaks, secondary_threshold, frames_per_section // 2) - - all_boundaries = sorted(set([0] + primary_boundaries + secondary_boundaries + [len(rms) - 1])) - consolidated_boundaries = [all_boundaries[0]] - for boundary in all_boundaries[1:]: - min_gap = frames_per_section * 0.4 - if boundary - consolidated_boundaries[-1] >= min_gap: - consolidated_boundaries.append(boundary) - - if len(consolidated_boundaries) < 3 and duration > min_section_seconds * 2: - _ = smoothed_rms - n_segments = max(3, min(6, int(duration / min_section_seconds))) - segment_boundaries = [0] - for i in range(1, n_segments): - target_frame = int(i * len(rms) / n_segments) - search_range = max(1, int(len(rms) / (n_segments * 2))) - best_frame = target_frame - best_diff = float('inf') - for j in range(max(0, target_frame - search_range), min(len(energy_diff), target_frame + search_range)): - if float(energy_diff[j]) > best_diff * 0.8: - best_diff = float(energy_diff[j]) - best_frame = j - segment_boundaries.append(best_frame) - segment_boundaries.append(len(rms) - 1) - consolidated_boundaries = sorted(set(consolidated_boundaries + segment_boundaries)) - - sections = [] - prev_features = None - prev_energy_trend = None - - for i in range(len(consolidated_boundaries) - 1): - start_frame = consolidated_boundaries[i] - end_frame = consolidated_boundaries[i + 1] - - if end_frame <= start_frame: - continue - - start_time = start_frame * hop_time - end_time = end_frame * hop_time - segment_duration = end_time - start_time - - min_duration = min_section_seconds * 0.2 - if segment_duration < min_duration: - if sections: - sections[-1]['end'] = end_time - sections[-1]['duration'] = end_time - sections[-1]['start'] - sections[-1]['merged_short'] = True - continue - - max_duration = self.max_section_seconds - if segment_duration > max_duration: - mid_frame = (start_frame + end_frame) // 2 - consolidated_boundaries.insert(i + 1, mid_frame) - end_frame = mid_frame - end_time = end_frame * hop_time - segment_duration = end_time - start_time - - features = self._compute_segment_features( - rms, onset, centroid, start_frame, end_frame, rms_global_max - ) - - energy = features.get('energy', 0.5) - if prev_features: - energy_trend = energy - prev_features.get('energy', 0.5) - else: - energy_trend = 0.0 - features['energy_trend'] = round(energy_trend, 3) - - position_ratio = start_time / duration if duration > 0 else 0.0 - positional_weight = self._compute_positional_weight(position_ratio, len(consolidated_boundaries) - 1, i) - - kind = self._classify_segment_v2( - features, position_ratio, prev_features, energy_trend, prev_energy_trend - ) - - estimated_bars = max(4, int(round(segment_duration * 128 / 60 / 4))) - - sections.append({ - 'kind': kind, - 'start': round(start_time, 3), - 'end': round(end_time, 3), - 'duration': round(segment_duration, 3), - 'bars': estimated_bars, - 'features': features, - 'positional_weight': positional_weight, - }) - - prev_features = features - prev_energy_trend = energy_trend - - merged = [] - for section in sections: - if merged and merged[-1]['kind'] == section['kind'] and section['duration'] < min_section_seconds * 0.6: - merged[-1]['end'] = section['end'] - merged[-1]['duration'] = round(section['end'] - merged[-1]['start'], 3) - merged[-1]['bars'] += section.get('bars', 4) - merged_features = merged[-1].get('features', {}) - new_features = section.get('features', {}) - merged_features['energy'] = (merged_features.get('energy', 0.5) + new_features.get('energy', 0.5)) / 2 - merged[-1]['merged_with_next'] = True - else: - merged.append(section) - - merged = self._validate_section_progression(merged, duration, 128.0) - merged = self._compute_energy_transitions(merged) - - merged = self._add_confidence_levels(merged) - - if len(merged) < 2 and duration > min_section_seconds * 2: - merged = self._create_fallback_sections(duration, 128.0, rms, onset) - - return merged - - def _add_confidence_levels(self, sections: List[Dict[str, Any]]) -> List[Dict[str, Any]]: - """Add human-readable confidence levels to sections.""" - for section in sections: - confidence = section.get('kind_confidence', 0.5) - if confidence >= SECTION_CONFIDENCE_THRESHOLDS['high']: - section['confidence_level'] = 'high' - elif confidence >= SECTION_CONFIDENCE_THRESHOLDS['medium']: - section['confidence_level'] = 'medium' - elif confidence >= SECTION_CONFIDENCE_THRESHOLDS['low']: - section['confidence_level'] = 'low' - else: - section['confidence_level'] = 'ambiguous' - return sections - - def _create_fallback_sections(self, duration: float, tempo: float, - rms: np.ndarray, onset: np.ndarray) -> List[Dict[str, Any]]: - """Create fallback sections when detection fails.""" - sections = [] - beats_per_second = tempo / 60.0 - seconds_per_bar = 4.0 / beats_per_second if beats_per_second > 0 else 2.0 - - total_bars = max(16, int(duration / seconds_per_bar)) - - if duration < 60: - sections = [ - {'kind': 'intro', 'start': 0.0, 'end': duration * 0.25, - 'duration': duration * 0.25, 'bars': max(4, int(total_bars * 0.25)), - 'kind_confidence': 0.35, 'confidence_level': 'low', - 'features': {'energy': 0.3}, 'detection_method': 'fallback'}, - {'kind': 'drop', 'start': duration * 0.25, 'end': duration * 0.75, - 'duration': duration * 0.5, 'bars': max(8, int(total_bars * 0.5)), - 'kind_confidence': 0.35, 'confidence_level': 'low', - 'features': {'energy': 0.6}, 'detection_method': 'fallback'}, - {'kind': 'outro', 'start': duration * 0.75, 'end': duration, - 'duration': duration * 0.25, 'bars': max(4, int(total_bars * 0.25)), - 'kind_confidence': 0.35, 'confidence_level': 'low', - 'features': {'energy': 0.35}, 'detection_method': 'fallback'}, - ] - else: - n_sections = min(5, max(3, int(duration / 30))) - section_duration = duration / n_sections - - energy_profile = [] - if len(rms) > n_sections: - segment_size = len(rms) // n_sections - for i in range(n_sections): - segment_rms = rms[i * segment_size:(i + 1) * segment_size] - energy_profile.append(float(np.mean(segment_rms)) if len(segment_rms) > 0 else 0.5) - max_energy = max(energy_profile) if energy_profile else 0.5 - energy_profile = [e / max_energy for e in energy_profile] - else: - energy_profile = [0.3, 0.5, 0.7, 0.6, 0.4][:n_sections] - - kinds = ['intro', 'verse', 'build', 'drop', 'outro'] - for i in range(n_sections): - kind = kinds[i] if i < len(kinds) else 'verse' - if i == n_sections - 1: - kind = 'outro' - elif i == 0: - kind = 'intro' - elif i == n_sections - 2: - kind = 'drop' - elif energy_profile[i] > 0.6 and i > 0 and i < n_sections - 1: - kind = 'drop' - - start = i * section_duration - end = (i + 1) * section_duration if i < n_sections - 1 else duration - - sections.append({ - 'kind': kind, - 'start': round(start, 3), - 'end': round(end, 3), - 'duration': round(end - start, 3), - 'bars': max(4, int((end - start) / seconds_per_bar)), - 'kind_confidence': 0.30, - 'confidence_level': 'low', - 'features': {'energy': energy_profile[i] if i < len(energy_profile) else 0.5}, - 'detection_method': 'fallback_energy_profile', - }) - - return sections - - def _compute_positional_weight(self, position_ratio: float, total_sections: int, - section_index: int) -> float: - """Compute positional weight for section classification confidence.""" - if total_sections <= 1: - return 1.0 - - if position_ratio < 0.15: - return 1.2 - elif position_ratio > 0.85: - return 1.2 - elif 0.35 < position_ratio < 0.65: - return 0.9 - else: - return 1.0 - - def _classify_segment_v2(self, features: Dict[str, float], position_ratio: float, - prev_features: Optional[Dict[str, float]], - energy_trend: float, prev_energy_trend: Optional[float]) -> str: - """Classify segment with improved energy trend and context awareness.""" - energy = features.get('energy', 0.5) - onset = features.get('onset_density', 0.5) - brightness = features.get('brightness', 0.5) - stability = features.get('energy_stability', 1.0) - onset_var = features.get('onset_variability', 0.0) - - is_rising = energy_trend > 0.08 or (prev_energy_trend is not None and prev_energy_trend > 0.05 and energy_trend >= 0) - is_falling = energy_trend < -0.08 or (prev_energy_trend is not None and prev_energy_trend < -0.05) - - is_strong_rise = energy_trend > 0.15 - _ = energy_trend < -0.15 - - scores = {} - - if position_ratio < 0.18: - intro_energy_match = max(0, 0.5 - abs(energy - 0.22)) - intro_onset_match = max(0, 0.4 - abs(onset - 0.22)) - intro_pos_bonus = 0.65 * (0.18 - position_ratio) - intro_stability_bonus = 0.15 if stability > 0.5 else 0 - scores['intro'] = intro_energy_match + intro_onset_match + intro_pos_bonus + intro_stability_bonus - else: - scores['intro'] = -0.5 - - if position_ratio > 0.80: - outro_energy_match = max(0, 0.5 - abs(energy - 0.22)) - outro_onset_match = max(0, 0.4 - abs(onset - 0.22)) - outro_pos_bonus = 0.55 * (position_ratio - 0.80) - outro_falling_bonus = 0.25 if is_falling else (0.10 if not is_rising else -0.15) - scores['outro'] = outro_energy_match + outro_onset_match + outro_pos_bonus + outro_falling_bonus - else: - scores['outro'] = -0.2 - - if is_strong_rise and 0.38 < energy < 0.75: - scores['build'] = 0.85 + (abs(energy_trend) * 1.5) + (onset * 0.25) - elif is_rising and 0.35 < energy < 0.78: - scores['build'] = 0.55 + (abs(energy_trend) * 2.0) + (onset * 0.15) - elif 0.35 < energy < 0.72 and onset > 0.45 and position_ratio < 0.75: - scores['build'] = 0.38 + (onset * 0.25) - elif 0.38 < energy < 0.65 and onset_var > 0.2: - scores['build'] = 0.32 + (onset_var * 0.3) - else: - scores['build'] = max(0, 0.15 - abs(energy_trend) * 2) if energy_trend < 0.05 else 0.08 - - if energy > 0.68 and onset > 0.48 and stability > 0.55: - brightness_bonus = 0.12 if brightness > 0.5 else 0 - scores['drop'] = (energy - 0.50) * 1.4 + (onset - 0.40) * 0.7 + brightness_bonus - elif energy > 0.60 and onset > 0.42: - scores['drop'] = (energy - 0.50) * 1.1 + onset * 0.45 - elif energy > 0.52: - scores['drop'] = 0.35 + (energy - 0.52) * 1.5 - else: - scores['drop'] = max(-0.3, (energy - 0.45) * 2) - - if energy < 0.40 and onset < 0.32 and stability > 0.45: - scores['break'] = 0.75 + (0.40 - energy) * 0.55 + (0.32 - onset) * 0.45 - elif energy < 0.48 and onset < 0.38 and not is_rising: - scores['break'] = 0.45 + (0.48 - energy) * 0.35 + (0.38 - onset) * 0.25 - elif energy < 0.45 and brightness < 0.45: - scores['break'] = 0.35 + (0.45 - energy) * 0.3 - else: - scores['break'] = max(0, 0.08 - abs(energy - 0.35) - abs(onset - 0.32)) - - if 0.22 < energy < 0.60 and 0.22 < onset < 0.68 and stability > 0.40: - scores['verse'] = 0.55 - abs(energy - 0.42) * 1.5 - abs(onset - 0.42) * 1.2 - elif 0.28 < energy < 0.52 and not is_rising and not is_falling: - scores['verse'] = 0.38 - abs(energy - 0.40) * 1.0 - elif 0.25 < energy < 0.55: - scores['verse'] = 0.25 - else: - scores['verse'] = 0.12 - - if not scores: - return 'drop' - - best_kind, best_score = max(scores.items(), key=lambda x: x[1]) - - if best_score < 0.10: - if energy > 0.52: - return 'drop' - elif position_ratio < 0.18: - return 'intro' - elif position_ratio > 0.82: - return 'outro' - elif energy < 0.42: - return 'break' - elif is_rising: - return 'build' - else: - return 'verse' - - second_best = sorted(scores.items(), key=lambda x: x[1], reverse=True) - if len(second_best) > 1: - score_gap = second_best[0][1] - second_best[1][1] - if score_gap < 0.12: - if second_best[0][0] == 'drop' and second_best[1][0] == 'build': - if is_rising: - return 'build' - - return best_kind - - -def generate_segment_rag_summary(report: Dict[str, Any], - library_dir: Path) -> Dict[str, Any]: - """ - Genera resumen enriquecido del indexado. - - Incluye: - - Estadisticas basicas del report - - Coverage por rol - - Segmentos por archivo (avg, min, max) - - Tiempo de procesamiento estimado - - Salud del cache - """ - manifest = report.get('manifest', []) - - # Calcular estadisticas - segment_counts = [m.get('segments', 0) for m in manifest] - - # Coverage por rol - role_segments: Dict[str, int] = defaultdict(int) - for m in manifest: - for role in m.get('roles', []): - role_segments[role] += m.get('segments', 0) - - # Cache size - cache_dir = library_dir / ".segment_rag" - cache_size_bytes = sum(f.stat().st_size for f in cache_dir.glob("*.json.gz")) if cache_dir.exists() else 0 - - return { - **report, # Incluir todos los campos originales - - # Estadisticas agregadas - "summary_stats": { - "avg_segments_per_file": sum(segment_counts) / len(segment_counts) if segment_counts else 0, - "min_segments": min(segment_counts) if segment_counts else 0, - "max_segments": max(segment_counts) if segment_counts else 0, - "total_files_indexed": len(manifest), - }, - - # Coverage por rol - "role_coverage": dict(role_segments), - - # Cache info - "cache_info": { - "cache_dir": str(cache_dir), - "cache_size_bytes": cache_size_bytes, - "cache_size_mb": round(cache_size_bytes / (1024 * 1024), 2), - }, - - # Timestamp - "generated_at": time.time(), - "generated_at_iso": time.strftime('%Y-%m-%dT%H:%M:%S'), - } - - -class ReferenceAudioListener: - # Improved role patterns with more comprehensive matching - ROLE_PATTERNS = { - 'kick': ['**/*Kick*.wav', '**/*kick*.wav', '**/*KICK*.wav', '**/*Kick_*.wav', '**/*_Kick*.wav', '**/*BD*.wav', '**/*bd*.wav', '**/*bd_*.wav'], - 'snare': ['**/*Clap*Hit*.wav', '**/*Snare*.wav', '**/*snare*.wav', '**/*Clap*.wav', '**/*clap*.wav', - '**/*SNARE*.wav', '**/*CLAP*.wav', '**/*Clap_*.wav', '**/*Snare_*.wav', '**/*SD*.wav', '**/*sd*.wav'], - 'hat': ['**/*Closed Hat*.wav', '**/*Hat*.wav', '**/*hat*.wav', '**/*HAT*.wav', '**/*ClosedHat*.wav', - '**/*Open Hat*.wav', '**/*OpenHat*.wav', '**/*cym*.wav', '**/*hihat*.wav', '**/*HiHat*.wav', '**/*HH*.wav', '**/*hh_*.wav'], - 'bass_loop': ['**/*Bass Loop*.wav', '**/*Bass_Loop*.wav', '**/*bass_loop*.wav', '**/*BassLoop*.wav', - '**/*BASS LOOP*.wav', '**/*Sub*Bass*.wav', '**/*Reese*.wav', '**/*808*.wav', '**/bass/*.wav'], - 'perc_loop': ['**/*Percussion Loop*.wav', '**/*Perc_Loop*.wav', '**/*perc_loop*.wav', - '**/*PercLoop*.wav', '**/*Perc*.wav', '**/*perc*.wav', '**/*Conga*.wav', '**/perc/*.wav'], - 'top_loop': ['**/*Top Loops*.wav', '**/*Top Loop*.wav', '**/*Full Drum*.wav', '**/*top_loop*.wav', - '**/*TopLoop*.wav', '**/*Drum Loop*.wav', '**/*DrumLoop*.wav', '**/*FullDrum*.wav', '**/hat/*.wav'], - 'synth_loop': ['**/*Synth Loop*.wav', '**/*Synth_Loop*.wav', '**/*synth_loop*.wav', - '**/*SynthLoop*.wav', '**/*Synth*.wav', '**/*synth*.wav', '**/*Chord*.wav', '**/*Pad*.wav', '**/synth/*.wav'], - 'vocal_loop': ['**/*Vocal Loop*.wav', '**/*Vox*.wav', '**/*vocal_loop*.wav', '**/*VocalLoop*.wav', - '**/*Vocal*.wav', '**/*vocal*.wav', '**/*VOCAL*.wav', '**/*VoxLoop*.wav', '**/*Chopped*.wav', '**/vocal/*.wav'], - 'crash_fx': ['**/*Crash*.wav', '**/*crash*.wav', '**/*CRASH*.wav', '**/*Impact*.wav', '**/*impact*.wav', - '**/*Cymbal*.wav', '**/*cymbal*.wav', '**/fx/*.wav'], - 'fill_fx': ['**/*Fill*.wav', '**/*fill*.wav', '**/*Tom Loop*.wav', '**/*Tom*.wav', '**/*tom*.wav', - '**/*Transition*.wav', '**/*FX*.wav'], - 'snare_roll': ['**/*Snareroll*.wav', '**/*Snare Roll*.wav', '**/*snare_roll*.wav', '**/*SnareRoll*.wav', - '**/*Roll*.wav', '**/*roll*.wav', '**/*Buildup*.wav'], - 'atmos_fx': ['**/*Atmos*.wav', '**/*atmos*.wav', '**/*Drone*.wav', '**/*drone*.wav', '**/*Ambient*.wav', - '**/*Noise*.wav', '**/*noise*.wav', '**/*Texture*.wav', '**/*Pad*.wav', '**/textures/*.wav'], - 'vocal_shot': ['**/*Vocal One Shot*.wav', '**/*Vocal Importante*.wav', '**/*vocal_shot*.wav', - '**/*VocalShot*.wav', '**/*OneShot*.wav', '**/*Shot*.wav', '**/*vocal chop*.wav'], - } - - # Role bus assignments - ROLE_TO_BUS = { - 'kick': 'drums', 'snare': 'drums', 'hat': 'drums', - 'bass_loop': 'bass', - 'perc_loop': 'drums', 'top_loop': 'drums', - 'synth_loop': 'music', - 'vocal_loop': 'vocal', 'vocal_shot': 'vocal', - 'crash_fx': 'fx', 'fill_fx': 'fx', 'snare_roll': 'fx', 'atmos_fx': 'fx', - } - - # Patrones de exclusion fuerte por rol - estos NUNCA deben pasar - ROLE_EXCLUSION_PATTERNS = { - 'kick': [ - 'full drum', 'full_mix', 'fullmix', 'fulldrum', 'full mix','demo', 'song', 'master', 'top loop', 'drum loop', - 'snare roll', 'fill', 'hat loop', 'vocal loop', 'complete kit','full kit', 'mixed', 'stems', 'bounce', 'preview' - ], - 'snare': [ - 'full drum', 'full_mix', 'fullmix', 'full mix', 'demo', 'song','master', 'snare roll', 'snare_roll', 'hat loop', 'kick loop', - 'top loop', 'drum loop', 'bass loop', 'complete kit', 'full kit','mixed', 'stems', 'bounce', 'preview' - ], - 'hat': [ - 'full drum', 'full_mix', 'fullmix', 'full mix', 'demo', 'song','master', 'kick loop', 'snare loop', 'bass loop', 'vocal loop', - 'complete', 'full kit', 'mixed', 'stems', 'bounce', 'preview' - ], - 'bass_loop': [ - 'full drum', 'full_mix', 'fullmix', 'full mix', 'demo', 'song','master', 'top loop', 'vocal loop', 'vocal_loop', 'drum loop', - 'hat loop', 'snare loop', 'perc loop', 'fx loop', 'atmos','complete', 'mixed', 'stems', 'bounce', 'preview' - ], - 'vocal_loop': [ - 'full drum', 'full_mix', 'fullmix', 'full mix', 'demo', 'song','master', 'one shot', 'oneshot', 'hit', 'stab', 'drum loop', - 'bass loop', 'top loop', 'hat loop', 'kick', 'snare','complete', 'mixed', 'stems', 'bounce', 'preview' - ], - 'top_loop': [ - 'bass loop', 'bass_loop', 'vocal loop', 'vocal_loop','demo', 'song', 'master','synth loop', 'pad', 'atmos', 'riser', 'downlifter','complete', 'mixed', 'stems', 'bounce', 'preview' - ], - 'fill_fx': [ - 'kick', 'snare', 'hat', 'clap', 'bass', 'vocal','full mix', 'demo', 'song', 'master', 'loop', 'groove','complete', 'mixed', 'stems', 'bounce', 'preview' - ], - 'snare_roll': [ - 'kick', 'hat', 'clap', 'bass', 'vocal','full mix', 'demo', 'song', 'master', 'atmos', 'pad','complete', 'mixed', 'stems', 'bounce', 'preview' - ], - 'atmos_fx': [ - 'kick', 'snare', 'hat', 'clap', 'bass','full mix', 'demo', 'song', 'master', 'drum loop','complete', 'mixed', 'stems', 'bounce', 'preview' - ], - 'synth_loop': [ - 'full drum', 'full_mix', 'fullmix', 'full mix', 'demo', 'song','master', 'drum loop', 'vocal loop', 'bass loop','complete', 'mixed', 'stems', 'bounce', 'preview' - ], - 'crash_fx': [ - 'full mix', 'demo', 'song', 'master', 'loop','complete', 'mixed', 'stems', 'bounce', 'preview' - ], - 'vocal_shot': [ - 'full mix', 'demo', 'song', 'master', 'loop','complete', 'mixed', 'stems', 'bounce', 'preview' - ], - } - - def __init__(self, library_dir: str, cache_path: Optional[str] = None): - self.library_dir = Path(library_dir) - self.cache_path = Path(cache_path) if cache_path else self.library_dir / ".reference_audio_cache.json" - self.segment_index_dir = self.library_dir / ".segment_rag" - self.segment_index_dir.mkdir(parents=True, exist_ok=True) - self._cache: Dict[str, Any] = self._load_cache() - self.device, self.device_name = self._resolve_device() - self._recent_paths = deque(maxlen=64) # Increased from 48 - self._recent_families = deque(maxlen=32) # Increased from 24 - self._family_usage_count: Dict[str, int] = {} # Track family usage for progressive penalty - self._section_detector = SectionDetector() # New section detector - self.sample_index_path = self.library_dir / ".sample_index.json" - self.vector_store_dir = self.library_dir.parent / "vector_store" - self._sample_index_by_path = self._load_sample_index_metadata() - self._vector_store_meta_by_path, self._vector_store_meta_by_name = self._load_vector_store_metadata() - # DJ-06: Reference directory for auto-discovery - self.reference_dir = self.library_dir.parent / "reference" - self.reference_dir.mkdir(parents=True, exist_ok=True) - - def discover_reference_track(self) -> Optional[Dict[str, Any]]: - """DJ-06: Auto-discover reference tracks from librerias/reference/ directory. - Returns analysis results (BPM, key, energy) from the first found reference file. - """ - audio_extensions = {'.mp3', '.wav', '.aif', '.aiff', '.flac'} - ref_files = [] - try: - for f in self.reference_dir.iterdir(): - if f.is_file() and f.suffix.lower() in audio_extensions: - ref_files.append(f) - except Exception: - pass - - if not ref_files: - return None - - # Analyze the first (or most recent) reference file - ref_file = max(ref_files, key=lambda f: f.stat().st_mtime) - try: - analysis = self.analyze_file(str(ref_file)) - return { - "reference_file": str(ref_file), - "reference_name": ref_file.stem, - "bpm": analysis.get("tempo", 0), - "key": analysis.get("key", ""), - "key_score": analysis.get("key_score", 0), - "energy": analysis.get("rms_mean", 0), - "sections": [ - {"name": s.get("label", ""), "start": s.get("start", 0), "end": s.get("end", 0)} - for s in analysis.get("sections", []) - ], - "duration": analysis.get("duration", 0), - } - except Exception as e: - return {"error": str(e), "reference_file": str(ref_file)} - - def suggest_from_reference(self) -> Dict[str, Any]: - """DJ-06: Get generation suggestions from auto-discovered reference track. - Returns recommended BPM, key, and structure parameters. - """ - ref = self.discover_reference_track() - if ref is None: - return {"available": False, "note": "Place a reference track in librerias/reference/"} - - if "error" in ref: - return {"available": False, "error": ref["error"]} - - return { - "available": True, - "reference_name": ref.get("reference_name", ""), - "recommended_bpm": ref.get("bpm", 126), - "recommended_key": ref.get("key", "Am"), - "reference_duration": ref.get("duration", 0), - "section_count": len(ref.get("sections", [])), - "note": "Use these values with generate_track() for reference-informed generation" - } - - @staticmethod - def _name_contains_any(name: str, tokens: Tuple[str, ...]) -> bool: - return any(token in name for token in tokens) - - @staticmethod - def _name_contains_none(name: str, tokens: Tuple[str, ...]) -> bool: - return not any(token in name for token in tokens) - - def _resolve_device(self): - if torch is not None and torch_directml is not None: - try: - return torch_directml.device(), "directml" - except Exception: - pass - if torch is not None: - return torch.device("cpu"), "cpu" - return None, "numpy" - - def _load_cache(self) -> Dict[str, Any]: - if not self.cache_path.exists(): - return {} - try: - return json.loads(self.cache_path.read_text(encoding="utf-8")) - except Exception: - return {} - - def _save_cache(self) -> None: - try: - self.cache_path.write_text(json.dumps(self._cache, indent=2), encoding="utf-8") - except Exception: - pass - - def _cache_key(self, path: Path) -> str: - return str(path.resolve()).lower() - - def _fingerprint(self, path: Path) -> str: - stat = path.stat() - return f"{stat.st_size}:{stat.st_mtime_ns}" - - def _analysis_cache_key(self, path: Path, duration_limit: Optional[float] = None) -> str: - suffix = "full" if duration_limit is None else f"{float(duration_limit):.3f}" - return f"{self._cache_key(path)}|{suffix}" - - def _segment_index_cache_prefix(self, path: Path, windows: set) -> str: - path_key = hashlib.sha1(self._cache_key(path).encode("utf-8")).hexdigest()[:16] - fingerprint = hashlib.sha1(self._fingerprint(path).encode("utf-8")).hexdigest()[:12] - windows_key = "-".join(f"{float(item):.2f}" for item in sorted(float(value) for value in windows)) or "full" - return f"{path_key}__{fingerprint}__{windows_key}" - - def _segment_index_cache_path(self, path: Path, windows: set, duration_limit: float) -> Path: - prefix = self._segment_index_cache_prefix(path, windows) - duration_key = f"{float(duration_limit):.2f}" - return self.segment_index_dir / f"{prefix}__{duration_key}.json.gz" - - def _get_segment_rag_state_path(self) -> Path: - """Get the path to the segment RAG indexing state file.""" - return self.segment_index_dir / "indexing_state.json" - - def _save_segment_rag_state(self, state: Dict[str, Any]) -> None: - """Save segment RAG indexing state to disk.""" - state_path = self._get_segment_rag_state_path() - state_path.parent.mkdir(parents=True, exist_ok=True) - with open(state_path, "w", encoding="utf-8") as f: - json.dump(state, f, indent=2) - - def _load_segment_rag_state(self) -> Dict[str, Any]: - """Load segment RAG indexing state from disk.""" - state_path = self._get_segment_rag_state_path() - if not state_path.exists(): - return {} - try: - with open(state_path, "r", encoding="utf-8") as f: - return json.load(f) - except Exception: - logger.warning("Failed to load segment RAG state, starting fresh", exc_info=True) - return {} - - def _load_segment_bank_from_disk(self, path: Path, windows: set, duration_limit: float) -> List[Dict[str, Any]]: - cache_path = self._segment_index_cache_path(path, windows, duration_limit) - candidate_paths = [cache_path] - if not cache_path.exists(): - prefix = self._segment_index_cache_prefix(path, windows) - candidate_paths = sorted(self.segment_index_dir.glob(f"{prefix}__*.json.gz"), reverse=True) - if not candidate_paths: - return [] - try: - for candidate_path in candidate_paths: - with gzip.open(candidate_path, "rt", encoding="utf-8") as handle: - payload = json.load(handle) - # Handle new format with metadata - if isinstance(payload, dict): - return payload.get("segments", []) or [] - # Handle old format (list of segments) - if isinstance(payload, list): - return payload - except Exception: - logger.debug("Failed to load segment cache for %s", path, exc_info=True) - return [] - - def _save_segment_bank_to_disk(self, path: Path, windows: set, duration_limit: float, bank: List[Dict[str, Any]], metadata: Optional[Dict[str, Any]] = None) -> None: - cache_path = self._segment_index_cache_path(path, windows, duration_limit) - try: - payload: Dict[str, Any] = {"segments": bank} - if metadata: - payload["metadata"] = { - "file_name": metadata.get("file_name") or path.name, - "path": metadata.get("path") or str(path), - "roles": metadata.get("roles") or [], - "windows": sorted(float(w) for w in windows) if windows else [], - "duration_limit": float(duration_limit), - "indexed_at": time.time(), - } - with gzip.open(cache_path, "wt", encoding="utf-8") as handle: - json.dump(payload, handle) - except Exception: - logger.debug("Failed to save segment cache for %s", path, exc_info=True) - - def _load_vector_store_metadata(self) -> Tuple[Dict[str, Dict[str, Any]], Dict[str, Dict[str, Any]]]: - by_path: Dict[str, Dict[str, Any]] = {} - by_name: Dict[str, Dict[str, Any]] = {} - metadata_path = self.vector_store_dir / "metadata.json" - if not metadata_path.exists(): - return by_path, by_name - - try: - payload = json.loads(metadata_path.read_text(encoding="utf-8")) - except Exception as exc: - logger.debug("No se pudo leer metadata del vector store: %s", exc) - return by_path, by_name - - for item in payload if isinstance(payload, list) else []: - if not isinstance(item, dict): - continue - file_name = str(item.get("filename", "") or "").strip().lower() - actual_path = self.library_dir / str(item.get("filename", "") or "") - if not actual_path.exists(): - actual_path = self.library_dir / Path(str(item.get("path", "") or "")).name - if not actual_path.exists(): - continue - normalized = str(actual_path.resolve()).lower() - normalized_item = dict(item) - normalized_item["resolved_path"] = str(actual_path) - by_path[normalized] = normalized_item - if file_name and file_name not in by_name: - by_name[file_name] = normalized_item - return by_path, by_name - - def _load_sample_index_metadata(self) -> Dict[str, Dict[str, Any]]: - if not self.sample_index_path.exists(): - return {} - - try: - payload = json.loads(self.sample_index_path.read_text(encoding="utf-8")) - except Exception as exc: - logger.debug("No se pudo leer sample index: %s", exc) - return {} - - entries = payload.get("samples", []) if isinstance(payload, dict) else [] - by_path: Dict[str, Dict[str, Any]] = {} - for item in entries if isinstance(entries, list) else []: - if not isinstance(item, dict): - continue - file_path = Path(str(item.get("path", "") or "")) - if not file_path.exists(): - continue - by_path[str(file_path.resolve()).lower()] = dict(item) - return by_path - - def _build_blocks(self, rms: np.ndarray, onset: np.ndarray, sr: int, - hop_length: int = 512, block_seconds: float = 8.0) -> List[Dict[str, float]]: - block_size = max(1, int(round(block_seconds * sr / hop_length))) - blocks: List[Dict[str, float]] = [] - for index in range(0, len(rms), block_size): - block_rms = rms[index:index + block_size] - block_onset = onset[index:index + block_size] - if len(block_rms) == 0: - continue - start = index * hop_length / sr - end = min(len(rms) * hop_length / sr, (index + block_size) * hop_length / sr) - blocks.append({ - "start": round(float(start), 3), - "end": round(float(end), 3), - "rms": round(float(np.mean(block_rms)), 6), - "onset": round(float(np.mean(block_onset)), 6), - "energy": round(float(np.mean(block_rms) * 0.65 + np.mean(block_onset) * 0.35), 6), - }) - return blocks - - def _vectorize_analysis(self, analysis: Dict[str, Any]) -> List[float]: - chroma = list(analysis.get("chroma", [0.0] * 12)) - return [ - float(analysis.get("tempo", 0.0)) / 180.0, - min(float(analysis.get("duration", 0.0)), 240.0) / 240.0, - float(analysis.get("rms_mean", 0.0)), - float(analysis.get("rms_std", 0.0)), - min(float(analysis.get("onset_mean", 0.0)), 8.0) / 8.0, - min(float(analysis.get("onset_std", 0.0)), 8.0) / 8.0, - min(float(analysis.get("spectral_centroid", 0.0)), 10000.0) / 10000.0, - min(float(analysis.get("spectral_rolloff", 0.0)), 14000.0) / 14000.0, - ] + chroma - - def _compute_audio_descriptor( - self, - y: np.ndarray, - sr: int, - tempo_hint: float = 0.0, - duration_hint: float = 0.0, - ) -> Dict[str, Any]: - y = np.asarray(y, dtype=np.float32) - if y.size == 0: - return { - "deep_vector": [0.0] * 53, - "harmonic_ratio": 0.5, - "percussive_ratio": 0.5, - "spectral_bandwidth": 0.0, - "spectral_bandwidth_std": 0.0, - "spectral_flatness": 0.0, - "spectral_flatness_std": 0.0, - "zero_crossing_rate": 0.0, - "zero_crossing_rate_std": 0.0, - "mfcc": [0.0] * 13, - "spectral_contrast": [0.0] * 7, - } - - if y.size < 512: - y = np.pad(y, (0, 512 - y.size)) - - hop_length = 256 if y.size < sr * 2 else 512 - n_fft = _adaptive_n_fft(len(y), default_n_fft=2048, min_n_fft=256) - - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - onset_env = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length) - rms = librosa.feature.rms(y=y, hop_length=hop_length)[0] - centroid = librosa.feature.spectral_centroid(y=y, sr=sr, n_fft=n_fft, hop_length=hop_length)[0] - rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr, n_fft=n_fft, hop_length=hop_length)[0] - bandwidth = librosa.feature.spectral_bandwidth(y=y, sr=sr, n_fft=n_fft, hop_length=hop_length)[0] - flatness = librosa.feature.spectral_flatness(y=y, n_fft=n_fft, hop_length=hop_length)[0] - zcr = librosa.feature.zero_crossing_rate(y, hop_length=hop_length)[0] - try: - chroma = librosa.feature.chroma_cqt(y=y, sr=sr) - except Exception: - chroma = librosa.feature.chroma_stft(y=y, sr=sr, n_fft=n_fft, hop_length=hop_length) - mfcc = librosa.feature.mfcc(y=y, sr=sr, n_fft=n_fft, hop_length=hop_length, n_mfcc=13) - contrast = librosa.feature.spectral_contrast(y=y, sr=sr, n_fft=n_fft, hop_length=hop_length) - - try: - harmonic, percussive = librosa.effects.hpss(y) - total_energy = float(np.sum(np.abs(y))) or 1.0 - harmonic_ratio = float(np.sum(np.abs(harmonic)) / total_energy) - percussive_ratio = float(np.sum(np.abs(percussive)) / total_energy) - except Exception: - harmonic_ratio = 0.5 - percussive_ratio = 0.5 - - chroma_avg = _normalize_chroma(np.mean(chroma, axis=1)) - mfcc_avg = np.mean(mfcc, axis=1) - contrast_avg = np.mean(contrast, axis=1) - duration = float(duration_hint or librosa.get_duration(y=y, sr=sr)) - - deep_vector = [ - min(float(tempo_hint or 0.0), 220.0) / 220.0, - min(duration, 240.0) / 240.0, - min(float(np.mean(rms)), 1.0), - min(float(np.std(rms)), 1.0), - min(float(np.mean(onset_env)), 8.0) / 8.0, - min(float(np.std(onset_env)), 8.0) / 8.0, - min(float(np.mean(centroid)), 12000.0) / 12000.0, - min(float(np.std(centroid)), 6000.0) / 6000.0, - min(float(np.mean(rolloff)), 16000.0) / 16000.0, - min(float(np.std(rolloff)), 8000.0) / 8000.0, - min(float(np.mean(bandwidth)), 8000.0) / 8000.0, - min(float(np.std(bandwidth)), 4000.0) / 4000.0, - min(float(np.mean(flatness)), 1.0), - min(float(np.std(flatness)), 1.0), - min(float(np.mean(zcr)), 1.0), - min(float(np.std(zcr)), 1.0), - min(max(harmonic_ratio, 0.0), 1.0), - min(max(percussive_ratio, 0.0), 1.0), - ] + [float(item) for item in chroma_avg.tolist()] \ - + [float(np.clip(item / 100.0, -1.0, 1.0)) for item in mfcc_avg.tolist()] \ - + [min(float(item), 80.0) / 80.0 for item in contrast_avg.tolist()] - - return { - "deep_vector": [round(float(item), 6) for item in deep_vector], - "harmonic_ratio": round(float(harmonic_ratio), 6), - "percussive_ratio": round(float(percussive_ratio), 6), - "spectral_bandwidth": round(float(np.mean(bandwidth)), 3), - "spectral_bandwidth_std": round(float(np.std(bandwidth)), 3), - "spectral_flatness": round(float(np.mean(flatness)), 6), - "spectral_flatness_std": round(float(np.std(flatness)), 6), - "zero_crossing_rate": round(float(np.mean(zcr)), 6), - "zero_crossing_rate_std": round(float(np.std(zcr)), 6), - "mfcc": [round(float(item), 6) for item in mfcc_avg.tolist()], - "spectral_contrast": [round(float(item), 6) for item in contrast_avg.tolist()], - } - - def _section_kind_at_time(self, sections: List[Dict[str, Any]], seconds: float) -> str: - for section in sections: - start = float(section.get("start", 0.0) or 0.0) - end = float(section.get("end", start) or start) - if start <= seconds < end: - return str(section.get("kind", "verse") or "verse").lower() - return str(sections[-1].get("kind", "verse") if sections else "verse").lower() - - def _build_reference_segment_bank( - self, - reference_path: str, - reference: Dict[str, Any], - sections: List[Dict[str, Any]], - ) -> List[Dict[str, Any]]: - path = Path(reference_path) - fingerprint = self._fingerprint(path) - cache_key = f"segments::{self._cache_key(path)}::{fingerprint}" - cached = self._cache.get(cache_key) - if isinstance(cached, list) and cached: - return cached - - y, sr = librosa.load(str(path), sr=22050, mono=True) - duration = float(librosa.get_duration(y=y, sr=sr)) - tempo = float(reference.get("tempo", 0.0) or 0.0) - bank: List[Dict[str, Any]] = [] - - for window_seconds in (1.0, 2.0, 4.0, 8.0): - hop_seconds = max(0.25, window_seconds / 2.0) - cursor = 0.0 - while cursor + 0.25 <= duration: - end = min(duration, cursor + window_seconds) - start_sample = int(cursor * sr) - end_sample = max(start_sample + 256, int(end * sr)) - segment_audio = y[start_sample:end_sample] - if segment_audio.size < 256: - cursor += hop_seconds - continue - descriptor = self._compute_audio_descriptor( - segment_audio, - sr, - tempo_hint=tempo, - duration_hint=end - cursor, - ) - midpoint = cursor + ((end - cursor) / 2.0) - bank.append({ - "start": round(float(cursor), 3), - "end": round(float(end), 3), - "window_seconds": round(float(end - cursor), 3), - "kind": self._section_kind_at_time(sections, midpoint), - "vector": descriptor.get("deep_vector", []), - "rms_mean": descriptor.get("deep_vector", [0.0, 0.0, 0.0])[2] if descriptor.get("deep_vector") else 0.0, - "onset_mean": descriptor.get("deep_vector", [0.0] * 5)[4] if descriptor.get("deep_vector") else 0.0, - "spectral_centroid": round(float(descriptor.get("deep_vector", [0.0] * 7)[6] * 12000.0), 3) if descriptor.get("deep_vector") else 0.0, - "spectral_rolloff": round(float(descriptor.get("deep_vector", [0.0] * 9)[8] * 16000.0), 3) if descriptor.get("deep_vector") else 0.0, - "harmonic_ratio": descriptor.get("harmonic_ratio", 0.5), - "percussive_ratio": descriptor.get("percussive_ratio", 0.5), - "spectral_flatness": descriptor.get("spectral_flatness", 0.0), - "zero_crossing_rate": descriptor.get("zero_crossing_rate", 0.0), - }) - cursor += hop_seconds - - self._cache[cache_key] = bank - self._save_cache() - return bank - - def _build_candidate_segment_bank( - self, - candidate_path: str, - windows: set, - duration_limit: float = 32.0, - metadata: Optional[Dict[str, Any]] = None, - ) -> List[Dict[str, Any]]: - path = Path(candidate_path) - if not path.exists(): - return [] - - fingerprint = self._fingerprint(path) - windows_key = ",".join(str(item) for item in sorted(float(value) for value in windows)) or "full" - cache_key = f"candidate_segments::{self._cache_key(path)}::{fingerprint}::{windows_key}::{float(duration_limit):.3f}" - cached = self._cache.get(cache_key) - if isinstance(cached, list) and cached: - return cached - disk_cached = self._load_segment_bank_from_disk(path, windows, duration_limit) - if disk_cached: - self._cache[cache_key] = disk_cached - return disk_cached - - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - y, sr = librosa.load(str(path), sr=22050, mono=True, duration=duration_limit) - - file_duration = float(librosa.get_duration(y=y, sr=sr)) - bank: List[Dict[str, Any]] = [] - - for window_seconds in sorted(float(value) for value in windows if float(value) > 0.0): - if file_duration <= 0.0: - continue - hop_seconds = max(0.25, window_seconds / 2.0) - cursor = 0.0 - while cursor + 0.25 <= file_duration: - end = min(file_duration, cursor + window_seconds) - start_sample = int(cursor * sr) - end_sample = max(start_sample + 256, int(end * sr)) - segment_audio = y[start_sample:end_sample] - if segment_audio.size < 256: - cursor += hop_seconds - continue - descriptor = self._compute_audio_descriptor( - segment_audio, - sr, - duration_hint=end - cursor, - ) - bank.append({ - "start": round(float(cursor), 3), - "end": round(float(end), 3), - "window_seconds": round(float(end - cursor), 3), - "vector": descriptor.get("deep_vector", []), - }) - cursor += hop_seconds - - self._cache[cache_key] = bank - self._save_segment_bank_to_disk(path, windows, duration_limit, bank, metadata=metadata) - self._save_cache() - return bank - - def analyze_file(self, file_path: str, duration_limit: Optional[float] = None) -> Dict[str, Any]: - if librosa is None: - raise RuntimeError("librosa no está disponible") - - path = Path(file_path) - cache_key = self._analysis_cache_key(path, duration_limit) - legacy_key = self._cache_key(path) - fingerprint = self._fingerprint(path) - cached = self._cache.get(cache_key) - if not isinstance(cached, dict) and duration_limit is None: - cached = self._cache.get(legacy_key) - if isinstance(cached, dict) and cached.get("fingerprint") == fingerprint: - return dict(cached["analysis"]) - - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - y, sr = librosa.load(str(path), sr=22050, mono=True, duration=duration_limit) - hop_length = 512 - n_fft = _adaptive_n_fft(len(y)) - onset_env = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length) - rms = librosa.feature.rms(y=y, hop_length=hop_length)[0] - centroid = librosa.feature.spectral_centroid(y=y, sr=sr, n_fft=n_fft)[0] - rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr, n_fft=n_fft)[0] - try: - chroma = librosa.feature.chroma_cqt(y=y, sr=sr) - except Exception: - chroma = librosa.feature.chroma_stft(y=y, sr=sr, n_fft=n_fft, hop_length=hop_length) - chroma_avg = _normalize_chroma(np.mean(chroma, axis=1)) - key, key_score = _detect_key(chroma_avg) - tempo = _safe_float(librosa.feature.tempo(onset_envelope=onset_env, sr=sr, aggregate=np.median)) - - analysis = { - "path": str(path), - "file_name": path.name, - "duration": round(float(librosa.get_duration(y=y, sr=sr)), 3), - "tempo": round(float(tempo), 3), - "key": key, - "key_confidence": round(float(key_score), 6), - "rms_mean": round(float(np.mean(rms)), 6), - "rms_std": round(float(np.std(rms)), 6), - "onset_mean": round(float(np.mean(onset_env)), 6), - "onset_std": round(float(np.std(onset_env)), 6), - "spectral_centroid": round(float(np.mean(centroid)), 3), - "spectral_rolloff": round(float(np.mean(rolloff)), 3), - "chroma": [round(float(item), 6) for item in chroma_avg.tolist()], - "blocks": self._build_blocks(rms, onset_env, sr, hop_length=hop_length), - } - analysis["vector"] = self._vectorize_analysis(analysis) - analysis.update(self._compute_audio_descriptor( - y, - sr, - tempo_hint=float(analysis.get("tempo", 0.0) or 0.0), - duration_hint=float(analysis.get("duration", 0.0) or 0.0), - )) - - self._cache[cache_key] = { - "fingerprint": fingerprint, - "analysis": analysis, - } - if duration_limit is None: - self._cache[legacy_key] = self._cache[cache_key] - self._save_cache() - return dict(analysis) - - def analyze_reference(self, reference_path: str) -> Dict[str, Any]: - analysis = self.analyze_file(reference_path) - energies = [float(block.get("energy", 0.0)) for block in analysis.get("blocks", [])] - if energies: - max_energy = max(energies) or 1.0 - for block in analysis["blocks"]: - block["energy_norm"] = round(float(block["energy"]) / max_energy, 6) - analysis["device"] = self.device_name - return analysis - - def _is_excluded_full_track(self, path: Path, sample_meta: Optional[Dict[str, Any]], vector_meta: Optional[Dict[str, Any]]) -> bool: - vector_type = str((vector_meta or {}).get("type", "") or "").lower() - if vector_type == "full_track": - return True - - duration_estimate = float((vector_meta or {}).get("duration_estimate", 0.0) or 0.0) - category = str((sample_meta or {}).get("category", "") or "").lower() - if path.suffix.lower() == ".mp3" and duration_estimate >= 45.0: - return True - - if path.suffix.lower() == ".mp3" and path.exists(): - size_bytes = int((vector_meta or {}).get("size_bytes", (sample_meta or {}).get("size", 0)) or 0) - if size_bytes >= 5_000_000 and category in {"unknown", "loop"}: - return True - return False - - def _duration_estimate( - self, - path: Path, - sample_meta: Optional[Dict[str, Any]], - vector_meta: Optional[Dict[str, Any]], - ) -> float: - duration_estimate = float((vector_meta or {}).get("duration_estimate", 0.0) or 0.0) - if duration_estimate > 0.0: - return duration_estimate - sample_duration = float((sample_meta or {}).get("duration", 0.0) or 0.0) - if sample_duration > 0.0: - return sample_duration - sample_size = int((sample_meta or {}).get("size", 0) or 0) - if sample_size > 0: - return min(32.0, max(0.1, sample_size / 176400.0)) - try: - return min(32.0, max(0.1, path.stat().st_size / 176400.0)) - except Exception: - return 0.0 - - def _catalog_role_match( - self, - role: str, - path: Path, - sample_meta: Optional[Dict[str, Any]], - vector_meta: Optional[Dict[str, Any]], - ) -> bool: - if self._is_excluded_full_track(path, sample_meta, vector_meta): - return False - - name = path.name - stem = path.stem.lower() - name_match = self._matches_role_name(role, name) - - category = str((sample_meta or {}).get("category", "") or "").lower() - vector_type = str((vector_meta or {}).get("type", "") or "").lower() - duration_estimate = self._duration_estimate(path, sample_meta, vector_meta) - - role_categories = { - "kick": {"kick"}, - "snare": {"snare", "clap"}, - "hat": {"hat"}, - "bass_loop": {"bass"}, - "perc_loop": {"perc"}, - "top_loop": {"loop"}, - "synth_loop": {"synth"}, - "vocal_loop": {"vocal"}, - "crash_fx": {"fx"}, - "fill_fx": {"fx"}, - "snare_roll": {"fx"}, - "atmos_fx": {"fx", "synth"}, - "vocal_shot": {"vocal"}, - } - role_types = ROLE_VECTOR_TYPES.get(role, set()) - min_dur, max_dur = ROLE_DURATION_WINDOWS.get(role, (0.0, 999.0)) - duration_ok = duration_estimate <= 0.0 or (min_dur <= duration_estimate <= max_dur) - loopish_name = self._name_contains_any(stem, ("loop", "groove", "full drum", "full mix", "drum loop", "top loop")) - - if name_match: - return duration_ok - if role in {"kick", "snare", "hat"}: - return bool(category and category in role_categories.get(role, set()) and duration_ok) - if role == "bass_loop": - if category == "bass" and duration_ok: - return True - if vector_type and vector_type in role_types and duration_ok and self._name_contains_none(stem, ("drum loop", "full mix", "top loop", "vocal")): - return True - return False - if role == "perc_loop": - if category == "perc" and duration_ok and loopish_name: - return True - if vector_type and vector_type in role_types and duration_ok and loopish_name: - return True - return False - if role == "top_loop": - if category == "loop" and duration_ok and loopish_name and self._name_contains_none(stem, ("bass loop", "vocal", "synth loop")): - return True - if vector_type and vector_type in role_types and duration_ok and loopish_name: - return True - return False - if role == "synth_loop": - synthish_name = self._name_contains_any(stem, ("synth", "lead", "hook", "pluck", "pad", "chord", "arp", "melod")) - if category == "synth" and duration_ok and synthish_name: - return True - if vector_type and vector_type in role_types and duration_ok and synthish_name: - return True - return False - if role == "vocal_loop": - vocalish_loop = self._name_contains_any(stem, ("vocal loop", "vox", "acapella", "chant", "phrase", "vocal")) - if category == "vocal" and duration_ok and vocalish_loop and self._name_contains_none(stem, ("one shot", "shot", "importante", "stab", "hit")): - return True - if vector_type and vector_type in role_types and duration_ok and vocalish_loop and self._name_contains_none(stem, ("one shot", "shot", "importante", "stab", "hit")): - return True - return False - if role == "crash_fx": - return False - if role == "fill_fx": - if category == "fx" and duration_ok and self._name_contains_any(stem, ("fill", "transition", "tom loop", "drum fill", "break fill")): - return True - return False - if role == "snare_roll": - if category == "fx" and duration_ok and self._name_contains_any(stem, ("snareroll", "snare roll", "roll", "buildup")): - return True - return False - if role == "atmos_fx": - atmosish_name = self._name_contains_any(stem, ("atmos", "drone", "ambient", "noise", "texture", "downfilter", "sweep", "wash")) - if category in role_categories.get(role, set()) and duration_ok and atmosish_name: - return True - if vector_type and vector_type in role_types and duration_ok and atmosish_name: - return True - return False - if role == "vocal_shot": - if category == "vocal" and duration_ok and duration_estimate <= 3.0: - return True - if vector_type in role_types and duration_ok and duration_estimate <= 3.0: - return True - return False - return False - - def prewarm_library_matching_cache( - self, - roles: Optional[List[str]] = None, - max_files: Optional[int] = None, - duration_limit: float = 32.0, - ) -> Dict[str, Any]: - target_roles = [role for role in (roles or list(self.ROLE_PATTERNS.keys())) if role in self.ROLE_PATTERNS] - assets = self._list_assets() - windows_by_role = { - role: set(ROLE_SEGMENT_SETTINGS.get(role, {}).get("windows", set()) or set()) - for role in target_roles - } - - files: Dict[str, Tuple[Path, set]] = {} - for role in target_roles: - for file_path in assets.get(role, []): - normalized = str(file_path.resolve()).lower() - if normalized not in files: - files[normalized] = (file_path, set()) - files[normalized][1].update(windows_by_role.get(role, set())) - - ordered_files = list(files.values()) - if max_files is not None: - ordered_files = ordered_files[: max(0, int(max_files))] - - analyzed = 0 - segmented = 0 - errors = 0 - for file_path, windows in ordered_files: - try: - analysis = self.analyze_file(str(file_path), duration_limit=duration_limit) - analyzed += 1 - if float(analysis.get("duration", 0.0) or 0.0) > max(windows or {4.0}) * 1.5: - self._build_candidate_segment_bank(str(file_path), windows or {4.0}, duration_limit=duration_limit) - segmented += 1 - except Exception: - errors += 1 - - return { - "roles": target_roles, - "files_considered": len(ordered_files), - "analyzed": analyzed, - "segmented": segmented, - "errors": errors, - "cache_path": str(self.cache_path), - "device": self.device_name, - } - - def build_segment_rag_index( - self, - roles: Optional[List[str]] = None, - max_files: Optional[int] = None, - duration_limit: float = 32.0, - force: bool = False, - offset: int = 0, - batch_size: Optional[int] = None, - resume: bool = False, - ) -> Dict[str, Any]: - target_roles = [role for role in (roles or list(self.ROLE_PATTERNS.keys())) if role in self.ROLE_PATTERNS] - assets = self._list_assets() - files: Dict[str, Dict[str, Any]] = {} - - for role in target_roles: - for file_path in assets.get(role, []): - normalized = str(file_path.resolve()).lower() - if normalized not in files: - files[normalized] = { - "path": file_path, - "roles": set(), - "windows": set(), - } - files[normalized]["roles"].add(role) - files[normalized]["windows"].update(ROLE_SEGMENT_SETTINGS.get(role, {}).get("windows", set()) or set()) - - ordered_files = sorted( - files.values(), - key=lambda item: ( - -len(item["roles"]), - -sum(float(value) for value in item["windows"]), - item["path"].name.lower(), - ), - ) - - state = self._load_segment_rag_state() - indexed_entries = dict(state.get("indexed_entries", {}) or {}) - if resume: - indexed_paths = set(state.get("indexed_paths", []) or []) - if indexed_paths: - before_resume = len(ordered_files) - ordered_files = [ - entry for entry in ordered_files - if str(entry["path"].resolve()).lower() not in indexed_paths - ] - logger.info( - "Resume mode: skipped %d already indexed files, %d remaining", - before_resume - len(ordered_files), - len(ordered_files), - ) - - total_available = len(ordered_files) - if offset > 0: - ordered_files = ordered_files[offset:] - - limit = batch_size if batch_size is not None else max_files - if limit is not None: - ordered_files = ordered_files[: max(0, int(limit))] - - files_remaining = max(0, total_available - offset - len(ordered_files)) - - built = 0 - reused = 0 - skipped = 0 - errors = 0 - total_segments = 0 - manifest: List[Dict[str, Any]] = [] - - for entry in ordered_files: - path = entry["path"] - windows = entry["windows"] or {4.0} - normalized = str(path.resolve()).lower() - sample_meta = self._sample_index_by_path.get(normalized) - vector_meta = self._vector_store_meta_by_path.get(normalized) - estimated_duration = self._duration_estimate(path, sample_meta, vector_meta) - effective_duration_limit = min(max(estimated_duration, 0.5), duration_limit) if estimated_duration > 0.0 else duration_limit - disk_cached = self._load_segment_bank_from_disk(path, windows, effective_duration_limit) - cache_prefix = self._segment_index_cache_prefix(path, windows) - if disk_cached and not force: - reused += 1 - total_segments += len(disk_cached) - entry_report = { - "file_name": path.name, - "path": str(path), - "roles": sorted(entry["roles"]), - "segments": len(disk_cached), - "cached": True, - "cache_prefix": cache_prefix, - } - manifest.append(entry_report) - indexed_entries[normalized] = entry_report - continue - try: - analysis = self.analyze_file(str(path), duration_limit=duration_limit) - duration = float(analysis.get("duration", 0.0) or 0.0) - if duration < 0.5: - skipped += 1 - continue - segment_metadata = { - "file_name": path.name, - "path": str(path), - "roles": sorted(entry["roles"]), - } - bank = self._build_candidate_segment_bank(str(path), windows, duration_limit=min(max(duration, 0.5), duration_limit), metadata=segment_metadata) - built += 1 - total_segments += len(bank) - entry_report = { - "file_name": path.name, - "path": str(path), - "roles": sorted(entry["roles"]), - "segments": len(bank), - "cached": False, - "cache_prefix": cache_prefix, - } - manifest.append(entry_report) - indexed_entries[normalized] = entry_report - - if (built + reused) % 10 == 0: - periodic_state = { - "indexed_files": [item["file_name"] for item in indexed_entries.values()], - "indexed_paths": list(indexed_entries.keys()), - "indexed_entries": indexed_entries, - "last_offset": offset + (built + reused), - "total_processed": len(indexed_entries), - "timestamp": time.time(), - } - self._save_segment_rag_state(periodic_state) - logger.debug("Saved segment RAG state after %d processed files", built + reused) - except Exception: - errors += 1 - logger.debug("Failed to build segment index for %s", path, exc_info=True) - - final_state = { - "indexed_files": [item["file_name"] for item in indexed_entries.values()], - "indexed_paths": list(indexed_entries.keys()), - "indexed_entries": indexed_entries, - "last_offset": offset + (built + reused), - "total_processed": len(indexed_entries), - "timestamp": time.time(), - "complete": files_remaining == 0, - } - self._save_segment_rag_state(final_state) - - return { - "roles": target_roles, - "files_targeted": len(ordered_files), - "total_available": total_available, - "built": built, - "reused": reused, - "skipped": skipped, - "errors": errors, - "total_segments": total_segments, - "segment_index_dir": str(self.segment_index_dir), - "device": self.device_name, - "manifest": manifest, - "offset": offset, - "batch_size": batch_size, - "files_remaining": files_remaining, - "resumed": resume, - } - - def _list_assets(self) -> Dict[str, List[Path]]: - assets: Dict[str, List[Path]] = {role: [] for role in self.ROLE_PATTERNS} - if not self.library_dir.exists(): - return assets - - for role, patterns in self.ROLE_PATTERNS.items(): - seen = set() - indexed_paths = set(self._sample_index_by_path.keys()) | set(self._vector_store_meta_by_path.keys()) - - for normalized_path in sorted(indexed_paths): - path = Path(normalized_path) - if not path.exists() or not path.is_file(): - continue - if path.suffix.lower() not in {'.wav', '.aif', '.aiff', '.mp3'}: - continue - sample_meta = self._sample_index_by_path.get(normalized_path) - vector_meta = self._vector_store_meta_by_path.get(normalized_path) - if not self._catalog_role_match(role, path, sample_meta, vector_meta): - continue - if normalized_path in seen: - continue - seen.add(normalized_path) - assets[role].append(path) - - for pattern in patterns: - for match in sorted(self.library_dir.glob(pattern)): - if match.is_file() and match.suffix.lower() in {'.wav', '.aif', '.aiff', '.mp3'}: - normalized_match = str(match.resolve()).lower() - sample_meta = self._sample_index_by_path.get(normalized_match) - vector_meta = self._vector_store_meta_by_path.get(normalized_match) - if not self._catalog_role_match(role, match, sample_meta, vector_meta): - continue - if normalized_match in seen: - continue - seen.add(normalized_match) - assets[role].append(match) - return assets - - def _apply_role_exclusions(self, role: str, file_name: str) -> Tuple[bool, str]: - """ - Aplica exclusiones fuertes por rol. - - Retorna: - (should_reject, reason) - True si debe rechazar, False si pasa - """ - role_lower = role.lower() - if role_lower not in self.ROLE_EXCLUSION_PATTERNS: - return False, "" - - name_lower = file_name.lower() - exclusions = self.ROLE_EXCLUSION_PATTERNS[role_lower] - - for excl in exclusions: - if excl in name_lower: - return True, f"excluded pattern '{excl}' for role '{role}'" - - return False, "" - - def _validate_role_requirement(self, role: str, item: Dict[str, Any]) -> Tuple[bool, float, str]: - """ - Validates that a candidate sample meets role requirements. - - Returns: - (passes, score_modifier, reason) - True if passes, score modifier (0-1), reason string - """ - role_lower = role.lower() - file_name = str(item.get("file_name", "") or "").lower() - duration = float(item.get("duration", 0.0) or 0.0) - - min_dur, max_dur = ROLE_DURATION_WINDOWS.get(role_lower, (0.0, 999.0)) - - if duration > 0.0 and not (min_dur <= duration <= max_dur): - return False, 0.0, f"duration {duration:.1f}s outside range [{min_dur}, {max_dur}] for role {role}" - - if role_lower in {'kick', 'snare', 'hat', 'clap', 'hat_closed', 'hat_open'}: - if 'loop' in file_name and 'full' not in file_name: - if duration > 4.0: - return False, 0.3, f"one-shot role {role} has loop-like file (duration={duration:.1f}s)" - - if role_lower in {'bass_loop', 'vocal_loop', 'top_loop', 'synth_loop'}: - if duration < 1.0: - return False, 0.2, f"loop role {role} has very short duration ({duration:.1f}s)" - - must_contain = { - 'kick': ['kick', 'bd', 'bass_drum', '808'], - 'snare': ['snare', 'snr', 'sd', 'rim'], - 'clap': ['clap', 'clp', 'hand'], - 'hat': ['hat', 'hh', 'hihat', 'cymbal'], - 'bass_loop': ['bass', 'sub', 'reese', '808', 'bassline'], - 'vocal_loop': ['vocal', 'vox', 'voice', 'chant', 'acapella'], - 'top_loop': ['top', 'perc', 'drum', 'full'], - 'synth_loop': ['synth', 'lead', 'pad', 'chord', 'arp', 'pluck'], - 'crash_fx': ['crash', 'cymbal', 'impact', 'ride'], - 'fill_fx': ['fill', 'transition', 'tom'], - 'snare_roll': ['roll', 'snare', 'build'], - 'atmos_fx': ['atmos', 'drone', 'ambient', 'texture', 'noise'], - 'vocal_shot': ['vocal', 'vox', 'shot', 'chop', 'stab'], - } - - if role_lower in must_contain: - found = any(kw in file_name for kw in must_contain[role_lower]) - if not found: - return True, 0.65, f"no role keyword for {role}" - - return True, 1.0, "passes role validation" - - def _matches_role_name(self, role: str, file_name: str) -> bool: - name = Path(file_name).stem.lower() - # Check exclusions first - should_reject, reason = self._apply_role_exclusions(role, name) - if should_reject: - logger.debug("ROLE_EXCLUSION: %s", reason) - return False - if role == 'kick': - return 'kick' in name and 'loop' not in name - if role == 'snare': - return ('snare' in name or 'clap' in name) and 'roll' not in name and 'loop' not in name - if role == 'hat': - return 'hat' in name and 'loop' not in name and 'full mix' not in name - if role == 'bass_loop': - return self._name_contains_any(name, ('bass loop', 'bass_loop', 'bassline', 'sub bass', 'sub_bass', 'reese', '808')) \ - and self._name_contains_none(name, ('drum loop', 'full mix', 'top loop', 'vocal')) - if role == 'perc_loop': - return ( - self._name_contains_any(name, ('perc loop', 'perc_loop', 'percussion loop', 'drum loop', 'drum_loop', 'groove')) - or ('perc' in name and 'loop' in name) - or (self._name_contains_any(name, ('shaker', 'bongo', 'conga', 'timbale')) and 'loop' in name) - ) and self._name_contains_none(name, ('full mix', 'one shot', 'shot', 'vocal')) - if role == 'top_loop': - return self._name_contains_any(name, ('top loop', 'top_loop', 'top loops', 'full drum', 'full mix', 'drum loop', 'drum_loop')) \ - and self._name_contains_none(name, ('bass loop', 'vocal', 'synth loop')) - if role == 'synth_loop': - return ( - self._name_contains_any(name, ('synth loop', 'synth_loop', 'lead loop', 'lead_loop', 'hook', 'melody loop', 'melodic loop')) - or ('synth' in name and 'loop' in name) - or (self._name_contains_any(name, ('chord', 'pad', 'pluck', 'arp')) and 'loop' in name) - ) and self._name_contains_none(name, ('drum loop', 'full mix', 'vocal')) - if role == 'vocal_loop': - return ( - self._name_contains_any(name, ('vocal loop', 'vox loop', 'vox_', 'acapella', 'chant loop')) - or ('vocal' in name and 'loop' in name) - ) and self._name_contains_none(name, ('one shot', 'shot', 'importante', 'stab', 'hit')) - if role == 'crash_fx': - return self._name_contains_any(name, ('crash', 'cymbal', 'riser', 'downlifter', 'sweep', 'uplifter')) or ' impact ' in f" {name} " - if role == 'fill_fx': - return self._name_contains_any(name, ('fill', 'transition', 'tom loop', 'drum fill', 'break fill')) - if role == 'snare_roll': - return self._name_contains_any(name, ('snareroll', 'snare roll', 'roll', 'buildup')) and 'one shot' not in name - if role == 'atmos_fx': - return self._name_contains_any(name, ('atmos', 'drone', 'ambient', 'noise', 'texture', 'downfilter', 'wash', 'sweep')) - if role == 'vocal_shot': - return self._name_contains_any(name, ('vocal one shot', 'one shot', 'shot', 'importante', 'vocal chop', 'vocal stab')) - return True - - def _cosine_scores(self, reference_vector: List[float], candidate_vectors: List[List[float]]) -> List[float]: - if not candidate_vectors: - return [] - - ref = np.asarray(reference_vector, dtype=np.float32) - candidates = np.asarray(candidate_vectors, dtype=np.float32) - if torch is None or self.device is None or F is None: - ref_norm = np.linalg.norm(ref) or 1.0 - cand_norm = np.linalg.norm(candidates, axis=1) - cand_norm[cand_norm == 0] = 1.0 - return (candidates @ ref / (cand_norm * ref_norm)).astype(float).tolist() - - ref_tensor = torch.tensor(ref, dtype=torch.float32, device=self.device) - candidate_tensor = torch.tensor(candidates, dtype=torch.float32, device=self.device) - scores = F.cosine_similarity(candidate_tensor, ref_tensor.unsqueeze(0), dim=1) - return scores.detach().cpu().numpy().astype(float).tolist() - - def _cosine_matrix(self, left_vectors: List[List[float]], right_vectors: List[List[float]]) -> np.ndarray: - if not left_vectors or not right_vectors: - return np.zeros((0, 0), dtype=np.float32) - - left = np.asarray(left_vectors, dtype=np.float32) - right = np.asarray(right_vectors, dtype=np.float32) - - if torch is None or self.device is None or F is None: - left_norm = np.linalg.norm(left, axis=1, keepdims=True) - right_norm = np.linalg.norm(right, axis=1, keepdims=True) - left_norm[left_norm == 0] = 1.0 - right_norm[right_norm == 0] = 1.0 - return (left / left_norm) @ (right / right_norm).T - - left_tensor = torch.tensor(left, dtype=torch.float32, device=self.device) - right_tensor = torch.tensor(right, dtype=torch.float32, device=self.device) - left_tensor = F.normalize(left_tensor, p=2, dim=1) - right_tensor = F.normalize(right_tensor, p=2, dim=1) - return (left_tensor @ right_tensor.T).detach().cpu().numpy().astype(np.float32) - - def _tempo_score(self, candidate_tempo: float, reference_tempo: float) -> float: - if candidate_tempo <= 0 or reference_tempo <= 0: - return 0.5 - variants = [ - candidate_tempo, - candidate_tempo * 2.0, - candidate_tempo / 2.0, - candidate_tempo * 4.0, - candidate_tempo / 4.0, - ] - diff = min(abs(item - reference_tempo) for item in variants) - return math.exp(-diff / 10.0) - - def _vector_store_entry(self, candidate: Dict[str, Any]) -> Optional[Dict[str, Any]]: - path_key = str(candidate.get("path", "") or "").strip().lower() - if path_key and path_key in self._vector_store_meta_by_path: - return self._vector_store_meta_by_path[path_key] - file_name = str(candidate.get("file_name", "") or Path(path_key).name).strip().lower() - if file_name and file_name in self._vector_store_meta_by_name: - return self._vector_store_meta_by_name[file_name] - return None - - def _role_segment_relevance(self, role: str, segment: Dict[str, Any], reference: Dict[str, Any]) -> float: - kind = str(segment.get("kind", "verse") or "verse").lower() - centroid = float(segment.get("spectral_centroid", 0.0) or 0.0) - onset = float(segment.get("onset_mean", 0.0) or 0.0) - harmonic = float(segment.get("harmonic_ratio", 0.5) or 0.5) - percussive = float(segment.get("percussive_ratio", 0.5) or 0.5) - flatness = float(segment.get("spectral_flatness", 0.0) or 0.0) - zcr = float(segment.get("zero_crossing_rate", 0.0) or 0.0) - rms = float(segment.get("rms_mean", 0.5) or 0.5) - score = 0.0 - - if role == 'kick': - transient = min(1.0, onset / 3.0) - low_centroid = max(0.0, 1.0 - (centroid / 3000.0)) - score = transient * 0.35 + percussive * 0.30 + low_centroid * 0.20 + rms * 0.15 - elif role == 'snare': - transient = min(1.0, onset / 4.5) - mid_centroid = min(1.0, max(0.0, (centroid - 800) / 4000.0)) - score = transient * 0.32 + percussive * 0.28 + mid_centroid * 0.25 - elif role == 'hat': - high_centroid = min(1.0, centroid / 10000.0) - transient = min(1.0, onset / 4.0) - score = high_centroid * 0.38 + transient * 0.32 + zcr * 0.15 + percussive * 0.15 - elif role == 'bass_loop': - low_centroid = max(0.0, 1.0 - (centroid / 2200.0)) - harmonic_content = harmonic * 0.35 - low_flat = max(0.0, 1.0 - flatness * 1.5) - score = harmonic_content + low_centroid * 0.30 + low_flat * 0.20 + rms * 0.15 - elif role in {'perc_loop', 'top_loop'}: - transient = min(1.0, onset / 4.0) - mid_high_centroid = min(1.0, max(0.0, centroid / 8500.0)) - score = transient * 0.35 + percussive * 0.30 + mid_high_centroid * 0.20 + rms * 0.15 - elif role == 'synth_loop': - harmonic_content = harmonic * 0.38 - mid_centroid = min(1.0, max(0.0, (centroid - 500) / 7000.0)) - low_flat = max(0.0, 1.0 - flatness * 1.2) - score = harmonic_content + mid_centroid * 0.22 + low_flat * 0.25 - elif role == 'vocal_loop': - harmonic_content = harmonic * 0.32 - mid_centroid = min(1.0, max(0.0, (centroid - 200) / 4000.0)) - low_flat = max(0.0, 1.0 - flatness * 1.5) - score = harmonic_content + mid_centroid * 0.18 + low_flat * 0.25 + rms * 0.25 - elif role == 'crash_fx': - high_centroid = min(1.0, centroid / 12000.0) - transient = min(1.0, onset / 3.5) - high_flat = min(1.0, flatness * 2.5) - score = high_centroid * 0.30 + transient * 0.25 + high_flat * 0.25 - elif role == 'fill_fx': - transient = min(1.0, onset / 4.0) - percussive_content = percussive * 0.35 - mid_centroid = min(1.0, max(0.0, centroid / 7000.0)) - score = transient * 0.30 + percussive_content + mid_centroid * 0.20 - elif role == 'snare_roll': - transient = min(1.0, onset / 4.5) - percussive_content = percussive * 0.38 - mid_centroid = min(1.0, max(0.0, (centroid - 1000) / 5000.0)) - score = transient * 0.35 + percussive_content + mid_centroid * 0.15 - elif role == 'atmos_fx': - harmonic_content = harmonic * 0.28 - low_onset = max(0.0, 1.0 - onset * 2.0) - high_flat = min(1.0, flatness * 2.0) - score = harmonic_content + low_onset * 0.22 + high_flat * 0.25 + rms * 0.25 - elif role == 'vocal_shot': - harmonic_content = harmonic * 0.30 - transient = min(1.0, onset / 4.0) - mid_centroid = min(1.0, max(0.0, (centroid - 300) / 4500.0)) - score = harmonic_content + transient * 0.22 + mid_centroid * 0.28 - - section_bonus_map = { - 'kick': {'intro': 0.04, 'verse': 0.08, 'build': 0.12, 'drop': 0.18, 'break': -0.08, 'outro': 0.02}, - 'snare': {'intro': -0.06, 'verse': 0.06, 'build': 0.10, 'drop': 0.14, 'break': 0.03, 'outro': -0.04}, - 'hat': {'intro': 0.06, 'verse': 0.08, 'build': 0.14, 'drop': 0.12, 'break': -0.04, 'outro': 0.02}, - 'bass_loop': {'intro': -0.12, 'verse': 0.06, 'build': 0.12, 'drop': 0.20, 'break': -0.10, 'outro': -0.06}, - 'perc_loop': {'intro': 0.02, 'verse': 0.08, 'build': 0.14, 'drop': 0.18, 'break': 0.06, 'outro': 0.00}, - 'top_loop': {'intro': 0.04, 'verse': 0.08, 'build': 0.16, 'drop': 0.18, 'break': 0.02, 'outro': 0.00}, - 'synth_loop': {'intro': 0.06, 'verse': 0.04, 'build': 0.14, 'drop': 0.20, 'break': 0.12, 'outro': 0.02}, - 'vocal_loop': {'intro': -0.06, 'verse': 0.14, 'build': 0.08, 'drop': 0.16, 'break': 0.10, 'outro': -0.02}, - 'crash_fx': {'intro': 0.10, 'verse': 0.02, 'build': 0.16, 'drop': 0.10, 'break': -0.06, 'outro': 0.10}, - 'fill_fx': {'intro': 0.02, 'verse': 0.04, 'build': 0.20, 'drop': 0.12, 'break': 0.10, 'outro': 0.02}, - 'snare_roll': {'intro': -0.08, 'verse': 0.02, 'build': 0.26, 'drop': 0.14, 'break': 0.06, 'outro': -0.10}, - 'atmos_fx': {'intro': 0.22, 'verse': 0.04, 'build': 0.02, 'drop': -0.06, 'break': 0.24, 'outro': 0.18}, - 'vocal_shot': {'intro': -0.06, 'verse': 0.10, 'build': 0.12, 'drop': 0.16, 'break': 0.08, 'outro': -0.04}, - } - score += section_bonus_map.get(role, {}).get(kind, 0.0) - return max(0.0, min(1.0, score)) - - def _select_role_reference_segments( - self, - role: str, - reference: Dict[str, Any], - segment_bank: List[Dict[str, Any]], - ) -> List[Dict[str, Any]]: - if not segment_bank: - return [] - settings = ROLE_SEGMENT_SETTINGS.get(role, {}) - allowed_windows = settings.get("windows", set()) - allowed_kinds = settings.get("section_kinds", set()) - filtered = [ - segment for segment in segment_bank - if (not allowed_windows or round(float(segment.get("window_seconds", 0.0)), 1) in allowed_windows) - and (not allowed_kinds or str(segment.get("kind", "")).lower() in allowed_kinds) - ] - if not filtered: - filtered = segment_bank - ranked = sorted( - filtered, - key=lambda item: self._role_segment_relevance(role, item, reference), - reverse=True, - ) - return ranked[:int(settings.get("top_k", 6) or 6)] - - def _role_segment_similarity( - self, - role: str, - candidate: Dict[str, Any], - role_segments: List[Dict[str, Any]], - ) -> float: - role_vectors = [list(segment.get("vector", []) or []) for segment in role_segments if segment.get("vector")] - if not role_vectors: - return 0.0 - - candidate_vectors: List[List[float]] = [] - candidate_vector = list(candidate.get("deep_vector", []) or []) - if candidate_vector: - candidate_vectors.append(candidate_vector) - - candidate_path = str(candidate.get("path", "") or "") - candidate_duration = float(candidate.get("duration", 0.0) or 0.0) - windows = set(ROLE_SEGMENT_SETTINGS.get(role, {}).get("windows", set()) or set()) - if candidate_path and candidate_duration > max(windows or {4.0}) * 1.5: - segment_bank = self._build_candidate_segment_bank(candidate_path, windows, duration_limit=min(max(candidate_duration, 0.0), 32.0)) - candidate_vectors.extend( - list(segment.get("vector", []) or []) - for segment in segment_bank - if segment.get("vector") - ) - - if not candidate_vectors: - return 0.0 - - matrix = self._cosine_matrix(candidate_vectors, role_vectors) - if matrix.size == 0: - return 0.0 - best_per_candidate = matrix.max(axis=1).tolist() - best_per_candidate.sort(reverse=True) - top = best_per_candidate[: min(3, len(best_per_candidate))] - return float(sum(top) / len(top)) - - def _vector_store_role_score(self, role: str, candidate: Dict[str, Any], reference: Dict[str, Any]) -> float: - entry = self._vector_store_entry(candidate) - if not entry: - return 0.5 - - entry_type = str(entry.get("type", "") or "").lower() - duration = float(entry.get("duration_estimate", candidate.get("duration", 0.0)) or 0.0) - tags = [str(tag).lower() for tag in entry.get("tags", []) if tag] - file_name = str(candidate.get("file_name", entry.get("filename", "")) or "").lower() - - type_score = 0.6 if not entry_type else (1.0 if entry_type in ROLE_VECTOR_TYPES.get(role, set()) else 0.35) - duration_score = self._duration_score(role, duration, file_name) - tag_score = self._naming_score(role, " ".join(tags + [file_name])) - tempo_score = self._tempo_score(float(entry.get("bpm", candidate.get("tempo", 0.0)) or 0.0), float(reference.get("tempo", 0.0) or 0.0)) - score = type_score * 0.34 + duration_score * 0.28 + tag_score * 0.26 + tempo_score * 0.12 - if role == 'crash_fx' and any(marker in file_name for marker in ['top loop', 'top loops', 'hat', 'snare']): - score *= 0.25 - return max(0.0, min(1.0, score)) - - def _role_score( - self, - role: str, - reference: Dict[str, Any], - candidate: Dict[str, Any], - cosine_score: float, - segment_score: float = 0.0, - catalog_score: float = 0.5, - ) -> float: - if segment_score > 0: - if role in {'kick', 'snare', 'hat', 'crash_fx', 'fill_fx', 'snare_roll', 'vocal_shot'}: - cosine_score = (float(cosine_score) * 0.28) + (float(segment_score) * 0.72) - elif role in {'bass_loop', 'perc_loop', 'top_loop', 'synth_loop', 'vocal_loop', 'atmos_fx'}: - cosine_score = (float(cosine_score) * 0.42) + (float(segment_score) * 0.58) - else: - cosine_score = (float(cosine_score) * 0.5) + (float(segment_score) * 0.5) - tempo_score = self._tempo_score(float(candidate.get("tempo", 0.0)), float(reference.get("tempo", 0.0))) - key_distance = _key_distance(reference.get("key"), candidate.get("key")) - key_score = max(0.0, 1.0 - (key_distance / 6.0)) - duration = float(candidate.get("duration", 0.0)) - onset = float(candidate.get("onset_mean", 0.0)) - rms = float(candidate.get("rms_mean", 0.0)) - file_name = str(candidate.get("file_name", "") or "").lower() - duration_score = self._duration_score(role, duration, file_name) - naming_score = self._naming_score(role, file_name) - spectral_score = self._spectral_role_score(role, candidate) - - if role in ['kick', 'snare', 'hat']: - base_score = ( - cosine_score * 0.18 + - tempo_score * 0.10 + - min(1.0, onset / 4.0) * 0.20 + - duration_score * 0.22 + - naming_score * 0.18 + - spectral_score * 0.12 - ) - elif role == 'bass_loop': - base_score = ( - cosine_score * 0.24 + - tempo_score * 0.20 + - key_score * 0.20 + - duration_score * 0.16 + - min(1.0, rms / 0.5) * 0.08 + - spectral_score * 0.12 - ) - elif role in ['perc_loop', 'top_loop']: - base_score = ( - cosine_score * 0.24 + - tempo_score * 0.26 + - key_score * 0.06 + - duration_score * 0.16 + - min(1.0, onset / 3.5) * 0.16 + - spectral_score * 0.12 - ) - elif role == 'synth_loop': - base_score = ( - cosine_score * 0.24 + - tempo_score * 0.16 + - key_score * 0.22 + - duration_score * 0.16 + - naming_score * 0.10 + - spectral_score * 0.12 - ) - elif role == 'vocal_loop': - base_score = ( - cosine_score * 0.26 + - tempo_score * 0.20 + - key_score * 0.06 + - duration_score * 0.18 + - naming_score * 0.18 + - spectral_score * 0.12 - ) - elif role == 'crash_fx': - base_score = ( - cosine_score * 0.14 + - tempo_score * 0.06 + - duration_score * 0.28 + - naming_score * 0.32 + - min(1.0, onset / 3.0) * 0.08 + - spectral_score * 0.12 - ) - elif role == 'fill_fx': - base_score = ( - cosine_score * 0.16 + - tempo_score * 0.16 + - duration_score * 0.22 + - naming_score * 0.22 + - min(1.0, onset / 3.0) * 0.12 + - spectral_score * 0.12 - ) - elif role == 'snare_roll': - base_score = ( - cosine_score * 0.14 + - tempo_score * 0.12 + - duration_score * 0.20 + - naming_score * 0.28 + - min(1.0, onset / 2.5) * 0.14 + - spectral_score * 0.12 - ) - elif role == 'atmos_fx': - base_score = ( - cosine_score * 0.28 + - tempo_score * 0.06 + - key_score * 0.16 + - duration_score * 0.22 + - naming_score * 0.16 + - spectral_score * 0.12 - ) - elif role == 'vocal_shot': - base_score = ( - cosine_score * 0.20 + - tempo_score * 0.10 + - key_score * 0.12 + - duration_score * 0.20 + - naming_score * 0.26 + - spectral_score * 0.12 - ) - else: - base_score = cosine_score * 0.5 + tempo_score * 0.3 + key_score * 0.2 - - return float(base_score) * (0.82 + (0.24 * float(catalog_score))) - - def _spectral_role_score(self, role: str, candidate: Dict[str, Any]) -> float: - """Score candidate based on spectral characteristics for the role.""" - centroid = float(candidate.get("spectral_centroid", 0.0)) - rolloff = float(candidate.get("spectral_rolloff", 0.0)) - rms_std = float(candidate.get("rms_std", 0.0)) - onset_mean = float(candidate.get("onset_mean", 0.0)) - rms_mean = float(candidate.get("rms_mean", 0.0)) - - # Compute spectral spread indicator - rms_spread = min(1.0, rms_std / max(0.01, rms_mean)) if rms_mean > 0 else 0.5 - - # Transient score based on onset - transient_score = min(1.0, onset_mean / 3.0) - - # Get expected signature for role - sig = SPECTRAL_ROLE_SIGNATURES.get(role) - if not sig: - return 0.5 - - score = 0.0 - - # Centroid match - centroid_min, centroid_max = sig.get('centroid_range', (0, 20000)) - if centroid_min <= centroid <= centroid_max: - score += 0.25 - else: - # Partial score for being close - dist = min(abs(centroid - centroid_min), abs(centroid - centroid_max)) - score += 0.25 * math.exp(-dist / 2000) - - # Rolloff match - rolloff_min, rolloff_max = sig.get('rolloff_range', (0, 20000)) - if rolloff_min <= rolloff <= rolloff_max: - score += 0.25 - else: - dist = min(abs(rolloff - rolloff_min), abs(rolloff - rolloff_max)) - score += 0.25 * math.exp(-dist / 3000) - - # RMS spread match (for one-shots vs loops) - spread_min, spread_max = sig.get('rms_spread', (0.0, 1.0)) - if spread_min <= rms_spread <= spread_max: - score += 0.25 - else: - dist = min(abs(rms_spread - spread_min), abs(rms_spread - spread_max)) - score += 0.25 * math.exp(-dist / 0.3) - - # Transient score match - trans_min, trans_max = sig.get('transient_score', (0.0, 1.0)) - if trans_min <= transient_score <= trans_max: - score += 0.25 - else: - dist = min(abs(transient_score - trans_min), abs(transient_score - trans_max)) - score += 0.25 * math.exp(-dist / 0.3) - - return min(1.0, max(0.0, score)) - - def _duration_score(self, role: str, duration: float, file_name: str) -> float: - """Improved duration scoring with better one-shot vs loop detection.""" - file_lower = file_name.lower() - - # One-shot roles: kick, snare/clap, hat - if role in ['kick', 'snare', 'hat']: - # Ideal one-shot duration: 0.1 - 1.5 seconds - is_explicit_loop = 'loop' in file_lower or 'looped' in file_lower - is_explicit_shot = 'shot' in file_lower or 'one shot' in file_lower or 'oneshot' in file_lower - - if is_explicit_shot and duration < 3.0: - return 1.0 - if is_explicit_loop: - return 0.35 - - # Duration-based scoring for one-shots - if duration < 0.1: - return 0.4 # Too short, probably artifact - if duration < 2.0: - # Sweet spot for one-shots - peak = 0.5 if role == 'kick' else (0.8 if role == 'hat' else 0.6) - score = math.exp(-abs(duration - peak) / 1.0) - return max(0.0, min(1.0, score)) - if duration < 4.0: - # Could be a roll or extended hit - return 0.5 if 'roll' in file_lower else 0.3 - return 0.2 # Too long for one-shot - - # Loop roles: bass, perc, top, synth, vocal - if role in ['bass_loop', 'perc_loop', 'top_loop', 'synth_loop', 'vocal_loop']: - is_explicit_loop = 'loop' in file_lower or 'looped' in file_lower - is_explicit_shot = 'shot' in file_lower or 'one shot' in file_lower or 'oneshot' in file_lower - - if is_explicit_shot: - return 0.25 # One-shot marked as loop role - - # Ideal loop duration: 2 - 16 seconds (typically 4 or 8 bars) - if duration < 0.5: - return 0.2 # Too short for a proper loop - if duration < 2.0: - # Short loop, acceptable but not ideal - base_score = duration / 2.0 - if is_explicit_loop: - base_score += 0.2 - return min(1.0, base_score) - if duration < 12.0: - # Sweet spot for loops (2-8 bars typically) - score = min(1.0, duration / 6.0) - if is_explicit_loop: - score = min(1.0, score + 0.15) - return score - if duration < 20.0: - # Longer loop, still acceptable - return 0.75 if is_explicit_loop else 0.6 - return 0.5 # Very long loop - - # FX roles - if role == 'crash_fx': - # Crashes: 0.5 - 4 seconds - if any(marker in file_lower for marker in ['loop', 'top', 'hat', 'snare']): - return 0.15 - if duration < 0.3: - return 0.3 - if duration < 5.0: - return math.exp(-abs(duration - 2.0) / 2.5) - return 0.4 - - if role in ['fill_fx', 'snare_roll']: - # Fills/rolls: 1 - 8 seconds - if duration < 0.5: - return 0.3 - if duration < 8.0: - return math.exp(-abs(duration - 4.0) / 3.0) - return 0.5 - - if role == 'atmos_fx': - # Atmos: longer, sustained sounds - if duration < 2.0: - return 0.4 - if duration < 30.0: - return min(1.0, duration / 12.0) - return 0.8 - - if role == 'vocal_shot': - # Vocal shots: short one-shots - if duration < 0.2: - return 0.5 - if duration < 2.0: - return math.exp(-abs(duration - 0.8) / 1.2) - if duration < 4.0: - return 0.4 - return 0.25 - - return 0.5 - - def _naming_score(self, role: str, file_name: str) -> float: - if role == 'kick': - if 'loop' in file_name: - return 0.45 - return 1.0 if 'kick' in file_name else 0.7 - if role == 'snare': - if 'roll' in file_name: - return 0.4 - if 'clap' in file_name or 'snare' in file_name: - return 1.0 - return 0.7 - if role == 'hat': - if 'loop' in file_name: - return 0.7 - if 'closed' in file_name or 'hat' in file_name: - return 1.0 - return 0.75 - if role == 'vocal_loop': - if 'vocal' in file_name or 'vox' in file_name: - return 1.0 - return 0.7 - if role == 'top_loop': - if 'top' in file_name or 'full drum' in file_name: - return 1.0 - if 'perc' in file_name: - return 0.58 - return 0.85 if 'loop' in file_name else 0.65 - if role in ['bass_loop', 'perc_loop', 'synth_loop']: - return 1.0 if 'loop' in file_name else 0.72 - if role == 'crash_fx': - if 'crash' in file_name: - return 1.0 - if 'impact' in file_name: - return 0.9 - if any(marker in file_name for marker in ['top loop', 'top loops', 'closed hat', 'open hat', 'snare', 'roll']): - return 0.2 - return 0.65 - if role == 'fill_fx': - if 'fill' in file_name: - return 1.0 - if 'tom' in file_name or 'roll' in file_name: - return 0.84 - return 0.62 - if role == 'snare_roll': - if 'roll' in file_name: - return 1.0 - if 'snare' in file_name or 'fill' in file_name: - return 0.82 - return 0.55 - if role == 'atmos_fx': - if 'atmos' in file_name: - return 1.0 - if 'drone' in file_name or 'noise' in file_name: - return 0.82 - return 0.64 - if role == 'vocal_shot': - if 'vocal' in file_name or 'importante' in file_name: - return 1.0 - if 'shot' in file_name: - return 0.88 - return 0.64 - return 0.8 - - def _candidate_path(self, item: Optional[Dict[str, Any]]) -> str: - if not isinstance(item, dict): - return "" - return str(item.get("path", "") or "").strip().lower() - - def _candidate_family(self, item: Optional[Dict[str, Any]]) -> str: - if not isinstance(item, dict): - return "" - - file_name = str(item.get("file_name", "") or Path(str(item.get("path", "") or "")).name).strip().lower() - stem = Path(file_name).stem.lower() - if not stem: - return "" - - markers = [ - " - kick", " - snare", " - clap", " - closed hat", " - open hat", " - hat", - " - bass loop", " - percussion loop", " - percussion", " - perc loop", - " - top loop", " - synth loop", " - vocal loop", " - vocal one shot", - " - fill", " - snareroll", " - snare roll", " - crash", " - atmos", - ] - for marker in markers: - if marker in stem: - return stem.split(marker, 1)[0].strip() - - if " - " in stem: - return " - ".join(part.strip() for part in stem.split(" - ")[:2] if part.strip()) - if "_" in stem: - return "_".join(stem.split("_")[:2]).strip("_") - - words = stem.split() - return " ".join(words[:2]) if words else stem - - def _remember_candidate(self, item: Optional[Dict[str, Any]]) -> None: - path_key = self._candidate_path(item) - family_key = self._candidate_family(item) - if path_key: - self._recent_paths.append(path_key) - if hasattr(self, '_generation_path_usage'): - self._generation_path_usage[path_key] += 1 - if family_key: - self._recent_families.append(family_key) - # Track usage count for progressive penalty - self._family_usage_count[family_key] = self._family_usage_count.get(family_key, 0) + 1 - if hasattr(self, '_generation_family_usage'): - self._generation_family_usage[family_key] += 1 - - def _get_family_penalty(self, family_key: str) -> float: - """Calculate progressive penalty for repeated families.""" - if not family_key: - return 1.0 - - if family_key in self._recent_families: - return 0.08 - - usage_count = self._family_usage_count.get(family_key, 0) - if usage_count == 0: - return 1.0 - if usage_count == 1: - return 0.45 - if usage_count == 2: - return 0.22 - if usage_count >= 3: - return 0.08 - - return 1.0 - - def _get_cross_generation_family_penalty(self, family_key: str) -> float: - """Penaliza familias usadas en generaciones previas de referencia.""" - if not family_key: - return 1.0 - usage_count = int(_cross_generation_reference_family_memory.get(family_key, 0) or 0) - if usage_count <= 0: - return 1.0 - if usage_count == 1: - return 0.55 - if usage_count == 2: - return 0.30 - if usage_count >= 3: - return 0.08 - return max(0.08, 1.0 - (usage_count * 0.18)) - - def _get_cross_generation_path_penalty(self, path_key: str) -> float: - """Penaliza paths usados en generaciones previas de referencia.""" - if not path_key: - return 1.0 - usage_count = int(_cross_generation_reference_path_memory.get(path_key, 0) or 0) - if usage_count <= 0: - return 1.0 - if usage_count == 1: - return 0.40 - if usage_count >= 2: - return 0.15 - return max(0.25, 1.0 - (usage_count * 0.20)) - - def _select_candidate(self, role: str, items: List[Dict[str, Any]], rng: random.Random, - section_kind: str = "", section_energy: float = 0.5) -> Optional[Dict[str, Any]]: - if not items: - return None - - pool_sizes = { - "kick": 16, - "snare": 16, - "hat": 18, - "bass_loop": 14, - "perc_loop": 16, - "top_loop": 14, - "synth_loop": 14, - "vocal_loop": 12, - "crash_fx": 10, - "fill_fx": 12, - "snare_roll": 10, - "atmos_fx": 10, - "vocal_shot": 12, - } - pool_size = min(pool_sizes.get(role, 10), len(items)) - candidates = list(items[:pool_size]) - - section_bonus = { - 'kick': {'intro': 0.04, 'verse': 0.08, 'build': 0.10, 'drop': 0.14, 'break': -0.06, 'outro': 0.02}, - 'snare': {'intro': -0.08, 'verse': 0.06, 'build': 0.10, 'drop': 0.12, 'break': 0.04, 'outro': -0.06}, - 'hat': {'intro': 0.06, 'verse': 0.08, 'build': 0.12, 'drop': 0.10, 'break': -0.04, 'outro': 0.02}, - 'bass_loop': {'intro': -0.10, 'verse': 0.08, 'build': 0.12, 'drop': 0.18, 'break': -0.08, 'outro': -0.04}, - 'perc_loop': {'intro': 0.02, 'verse': 0.08, 'build': 0.14, 'drop': 0.16, 'break': 0.04, 'outro': 0.00}, - 'top_loop': {'intro': 0.04, 'verse': 0.08, 'build': 0.14, 'drop': 0.16, 'break': 0.02, 'outro': 0.00}, - 'synth_loop': {'intro': 0.04, 'verse': 0.06, 'build': 0.12, 'drop': 0.18, 'break': 0.10, 'outro': 0.02}, - 'vocal_loop': {'intro': -0.04, 'verse': 0.12, 'build': 0.08, 'drop': 0.14, 'break': 0.08, 'outro': -0.02}, - 'crash_fx': {'intro': 0.08, 'verse': 0.02, 'build': 0.14, 'drop': 0.08, 'break': -0.04, 'outro': 0.08}, - 'fill_fx': {'intro': 0.02, 'verse': 0.04, 'build': 0.16, 'drop': 0.10, 'break': 0.08, 'outro': 0.02}, - 'snare_roll': {'intro': -0.06, 'verse': 0.02, 'build': 0.22, 'drop': 0.12, 'break': 0.04, 'outro': -0.08}, - 'atmos_fx': {'intro': 0.20, 'verse': 0.04, 'build': 0.02, 'drop': -0.04, 'break': 0.20, 'outro': 0.16}, - 'vocal_shot': {'intro': -0.04, 'verse': 0.08, 'build': 0.10, 'drop': 0.14, 'break': 0.06, 'outro': -0.02}, - } - - weighted: List[Tuple[float, Dict[str, Any]]] = [] - - for index, item in enumerate(candidates): - score = max(0.001, float(item.get("score", 0.001))) - rank_penalty = max(0.30, 1.0 - (index * 0.055)) - - passes_validation, validation_mod, validation_reason = self._validate_role_requirement(role, item) - if not passes_validation: - continue - - score *= validation_mod - - path_key = self._candidate_path(item) - path_penalty = 0.12 if path_key in self._recent_paths else 1.0 - - family_key = self._candidate_family(item) - family_penalty = self._get_family_penalty(family_key) - cross_family_penalty = self._get_cross_generation_family_penalty(family_key) - cross_path_penalty = self._get_cross_generation_path_penalty(path_key) - - section_bonus_val = section_bonus.get(role.lower(), {}).get(section_kind.lower(), 0.0) - if section_kind.lower() in {'drop', 'build'} and section_energy > 0.7: - section_bonus_val *= 1.2 - elif section_kind.lower() in {'break', 'intro'} and section_energy < 0.4: - section_bonus_val *= 1.2 - - energy_mod = 1.0 - rms = float(item.get("rms_mean", 0.0) or 0.0) - if role.lower() in {"kick", "snare", "bass_loop"}: - if rms > 0.08: - energy_mod = min(1.15, 1.0 + (rms - 0.08) * 2.0) - elif rms < 0.03 and section_kind.lower() not in {"intro", "break"}: - energy_mod = 0.85 - - role_randomness = 0.88 + (rng.random() * 0.24) - - weight = ( - (score ** 1.7) - * rank_penalty - * path_penalty - * family_penalty - * cross_family_penalty - * cross_path_penalty - * role_randomness - * energy_mod - ) - - if section_bonus_val > 0: - weight *= (1.0 + section_bonus_val) - elif section_bonus_val < 0: - weight *= (1.0 + section_bonus_val * 0.5) - - weighted.append((max(0.001, weight), item)) - - if not weighted: - weighted = [(max(0.001, float(item.get("score", 0.001))), item) for item in candidates] - - total = sum(weight for weight, _ in weighted) - if total <= 0: - return candidates[0] if candidates else None - - pivot = rng.random() * total - running = 0.0 - for weight, item in weighted: - running += weight - if pivot <= running: - return item - - return weighted[0][1] - - def _select_distinct_candidate( - self, - role: str, - items: List[Dict[str, Any]], - rng: random.Random, - used_paths: set, - used_families: set, - section_kind: str = "", - section_energy: float = 0.5, - ) -> Optional[Dict[str, Any]]: - if not items: - return None - - filtered = [ - item for item in items - if self._candidate_path(item) not in used_paths - ] - - family_filtered = [ - item for item in filtered - if self._candidate_family(item) not in used_families - ] - - pool = family_filtered if family_filtered else filtered if filtered else items - - selected = self._select_candidate(role, pool, rng, section_kind, section_energy) - selected_path = self._candidate_path(selected) - selected_family = self._candidate_family(selected) - - if selected_path: - used_paths.add(selected_path) - if selected_family: - used_families.add(selected_family) - - self._remember_candidate(selected) - return selected - - def reset_family_tracking(self) -> None: - """Reset family usage tracking for a new generation.""" - self._family_usage_count.clear() - self._recent_families.clear() - self._recent_paths.clear() - - def start_generation_tracking(self) -> None: - """Inicia tracking de paths/familias para una generación nueva.""" - self._generation_family_usage = defaultdict(int) - self._generation_path_usage = defaultdict(int) - - def end_generation_tracking(self) -> None: - """Actualiza memoria cross-generation de la ruta de referencia.""" - for key in list(_cross_generation_reference_family_memory.keys()): - _cross_generation_reference_family_memory[key] = max(0, _cross_generation_reference_family_memory[key] - 1) - for key in list(_cross_generation_reference_path_memory.keys()): - _cross_generation_reference_path_memory[key] = max(0, _cross_generation_reference_path_memory[key] - 1) - - for family, count in dict(getattr(self, '_generation_family_usage', {})).items(): - if family: - _cross_generation_reference_family_memory[family] += int(count) - for path_key, count in dict(getattr(self, '_generation_path_usage', {})).items(): - if path_key: - _cross_generation_reference_path_memory[path_key] += int(count) - - for key in list(_cross_generation_reference_family_memory.keys()): - if _cross_generation_reference_family_memory[key] <= 0: - del _cross_generation_reference_family_memory[key] - for key in list(_cross_generation_reference_path_memory.keys()): - if _cross_generation_reference_path_memory[key] <= 0: - del _cross_generation_reference_path_memory[key] - - if hasattr(self, '_generation_family_usage'): - delattr(self, '_generation_family_usage') - if hasattr(self, '_generation_path_usage'): - delattr(self, '_generation_path_usage') - - def reset_cross_generation_tracking(self) -> None: - """Resetea la memoria de diversidad entre generaciones para referencia.""" - _cross_generation_reference_family_memory.clear() - _cross_generation_reference_path_memory.clear() - - def reset_recent_sample_diversity_memory(self) -> None: - """Resetea la memoria de diversidad de samples recientes por rol.""" - global _recent_sample_diversity_memory - _recent_sample_diversity_memory.clear() - - def sync_recent_memory_from_selector(self) -> None: - """Sync recent sample diversity memory from sample_selector module.""" - global _recent_sample_diversity_memory - try: - from .sample_selector import _recent_sample_diversity_memory as selector_memory - for role, paths in selector_memory.items(): - if role not in _recent_sample_diversity_memory: - _recent_sample_diversity_memory[role] = [] - for path in paths: - if path not in _recent_sample_diversity_memory[role]: - _recent_sample_diversity_memory[role].append(path) - except ImportError: - pass - - def get_recent_sample_diversity_state(self) -> Dict[str, List[str]]: - """Get copy of recent sample diversity memory.""" - return {role: list(paths) for role, paths in _recent_sample_diversity_memory.items()} - - def match_assets(self, reference_path: str) -> Dict[str, Any]: - reference = self.analyze_reference(reference_path) - reference_sections = self.detect_reference_sections(reference_path) - segment_bank = self._build_reference_segment_bank(reference_path, reference, reference_sections) - assets = self._list_assets() - matches: Dict[str, List[Dict[str, Any]]] = {} - role_segments = { - role: self._select_role_reference_segments(role, reference, segment_bank) - for role in assets.keys() - } - rerank_limits = { - "kick": 14, - "snare": 14, - "hat": 16, - "bass_loop": 12, - "perc_loop": 14, - "top_loop": 12, - "synth_loop": 12, - "vocal_loop": 12, - "crash_fx": 10, - "fill_fx": 10, - "snare_roll": 10, - "atmos_fx": 8, - "vocal_shot": 10, - } - - for role, files in assets.items(): - analyses: List[Dict[str, Any]] = [] - vectors: List[List[float]] = [] - for file_path in files: - try: - analysis = self.analyze_file(str(file_path), duration_limit=64.0) - except Exception: - continue - analyses.append(analysis) - vectors.append(list(analysis.get("vector", []))) - - scores = self._cosine_scores(reference.get("vector", []), vectors) - role_matches: List[Dict[str, Any]] = [] - for analysis, cosine_score in zip(analyses, scores): - catalog_score = self._vector_store_role_score(role, analysis, reference) - preliminary_score = self._role_score( - role, - reference, - analysis, - float(cosine_score), - segment_score=0.0, - catalog_score=catalog_score, - ) - role_matches.append({ - "_analysis": analysis, - "_cosine": float(cosine_score), - "_catalog": float(catalog_score), - "_preliminary": float(preliminary_score), - }) - - role_matches.sort(key=lambda item: item["_preliminary"], reverse=True) - rerank_limit = min(int(rerank_limits.get(role, 10) or 10), len(role_matches)) - - role_section_features = self._section_detector._get_role_section_features(role, reference_sections, role_segments.get(role, [])) - - finalized_matches: List[Dict[str, Any]] = [] - for index, item in enumerate(role_matches): - analysis = item["_analysis"] - cosine_score = float(item["_cosine"]) - catalog_score = float(item["_catalog"]) - segment_score = 0.0 - character_bonus = 1.0 - final_score = float(item["_preliminary"]) - - if index < rerank_limit: - segment_score = self._role_segment_similarity(role, analysis, role_segments.get(role, [])) - final_score = self._role_score( - role, - reference, - analysis, - cosine_score, - segment_score=segment_score, - catalog_score=catalog_score, - ) - - if role_section_features: - character_bonus = self._section_detector._section_character_bonus( - role, role_section_features, analysis - ) - final_score = final_score * character_bonus - - finalized_matches.append({ - "path": analysis["path"], - "file_name": analysis["file_name"], - "tempo": analysis["tempo"], - "key": analysis["key"], - "duration": analysis["duration"], - "cosine": round(float(cosine_score), 6), - "segment_score": round(float(segment_score), 6), - "catalog_score": round(float(catalog_score), 6), - "character_bonus": round(float(character_bonus), 3), - "score": round(float(final_score), 6), - }) - - finalized_matches.sort(key=lambda item: item["score"], reverse=True) - matches[role] = finalized_matches - - # Build section energy profile for generator - section_energy_profile = [] - for section in reference_sections: - features = section.get('features', {}) - section_energy_profile.append({ - 'kind': section.get('kind', 'drop'), - 'energy_mean': features.get('energy_mean', features.get('energy', 0.5)), - 'energy_peak': features.get('energy_peak', 0.5), - 'energy_slope': features.get('energy_slope', 0.0), - 'spectral_centroid_mean': features.get('spectral_centroid_mean', features.get('brightness', 0.5)), - 'spectral_centroid_std': features.get('spectral_centroid_std', 0.0), - 'onset_rate': features.get('onset_rate', features.get('onset_density', 0.5)), - 'low_energy_ratio': features.get('low_energy_ratio', 0.0), - 'high_energy_ratio': features.get('high_energy_ratio', 0.0), - 'kind_confidence': section.get('kind_confidence', 0.5), - }) - - return { - "reference": reference, - "reference_sections": reference_sections, - "segment_bank_size": len(segment_bank), - "role_segments": { - role: [ - { - "start": segment.get("start"), - "end": segment.get("end"), - "kind": segment.get("kind"), - "window_seconds": segment.get("window_seconds"), - } - for segment in items - ] - for role, items in role_segments.items() - }, - "matches": matches, - "section_energy_profile": section_energy_profile, - "device": self.device_name, - } - - def _section_offsets(self, sections: List[Dict[str, Any]]) -> List[Tuple[Dict[str, Any], float, float]]: - offsets: List[Tuple[Dict[str, Any], float, float]] = [] - position = 0.0 - for section in sections: - beats = float(section.get("beats", 0.0) or (float(section.get("bars", 8)) * 4.0)) - start = position - end = position + beats - offsets.append((section, start, end)) - position = end - return offsets - - def _section_energy(self, reference: Dict[str, Any], progress: float) -> float: - blocks = reference.get("blocks", []) - if not blocks: - return 0.5 - index = min(len(blocks) - 1, max(0, int(round(progress * (len(blocks) - 1))))) - return float(blocks[index].get("energy_norm", 0.5)) - - def _loop_step_beats(self, item: Optional[Dict[str, Any]], project_bpm: float, default_beats: float = 16.0) -> float: - if not item: - return default_beats - duration = float(item.get("duration", 0.0)) - source_tempo = float(item.get("tempo", 0.0)) - if duration <= 0: - return default_beats - if source_tempo > 0: - source_beats = duration * source_tempo / 60.0 - rounded = max(4.0, round(source_beats / 4.0) * 4.0) - return float(rounded) - estimated = duration * project_bpm / 60.0 - rounded = max(4.0, round(estimated / 4.0) * 4.0) - return float(rounded) - - def _detect_roles_for_segment(self, features: Dict[str, float], section_kind: str) -> List[str]: - """Detect appropriate roles for a segment based on its features and section type.""" - roles = [] - energy = features.get('energy', 0.5) - onset = features.get('onset_density', 0.5) - brightness = features.get('brightness', 0.5) - - # Drums are always present in non-intro/outro sections - if section_kind in ['drop', 'build', 'verse']: - roles.extend(['kick', 'snare', 'hat']) - - # Bass is present in high-energy sections - if section_kind in ['drop', 'build'] or energy > 0.5: - roles.append('bass_loop') - - # Percussion and top loops based on onset density - if onset > 0.4: - roles.extend(['perc_loop', 'top_loop']) - - # Synths in drops and high-brightness sections - if section_kind == 'drop' or (brightness > 0.5 and energy > 0.6): - roles.append('synth_loop') - - # Vocals in drops and verse sections - if section_kind in ['drop', 'verse']: - roles.extend(['vocal_loop', 'vocal_shot']) - - # FX based on section type - if section_kind == 'build': - roles.extend(['snare_roll', 'fill_fx', 'crash_fx']) - elif section_kind == 'break': - roles.extend(['atmos_fx', 'fill_fx']) - elif section_kind == 'intro': - roles.extend(['atmos_fx', 'crash_fx']) - elif section_kind == 'outro': - roles.extend(['atmos_fx', 'crash_fx']) - - return list(set(roles)) - - def _analyze_segment_roles(self, reference: Dict[str, Any], sections: List[Dict[str, Any]]) -> Dict[str, List[str]]: - """Analyze and return recommended roles for each section.""" - segment_roles: Dict[str, List[str]] = {} - - for i, section in enumerate(sections): - kind = str(section.get("kind", "drop")).lower() - - # Use features if available from automatic detection - features = section.get("features", { - 'energy': 0.5, - 'onset_density': 0.5, - 'brightness': 0.5, - }) - - # Estimate features from position if not available - if 'energy' not in features: - blocks = reference.get("blocks", []) - if blocks: - progress = i / max(1, len(sections) - 1) - idx = min(len(blocks) - 1, max(0, int(progress * (len(blocks) - 1)))) - features['energy'] = float(blocks[idx].get("energy_norm", 0.5)) - - roles = self._detect_roles_for_segment(features, kind) - segment_roles[f"section_{i}_{kind}"] = roles - - return segment_roles - - def detect_reference_sections(self, reference_path: str, min_section_seconds: float = 8.0) -> List[Dict[str, Any]]: - """Automatically detect sections from a reference track with richer feature extraction.""" - if librosa is None: - raise RuntimeError("librosa no está disponible") - - path = Path(reference_path) - y, sr = librosa.load(str(path), sr=22050, mono=True) - hop_length = 512 - n_fft = _adaptive_n_fft(len(y)) - - onset_env = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length) - rms = librosa.feature.rms(y=y, hop_length=hop_length)[0] - centroid = librosa.feature.spectral_centroid(y=y, sr=sr, n_fft=n_fft)[0] - - duration = float(librosa.get_duration(y=y, sr=sr)) - - sections = self._section_detector.detect_sections( - rms, onset_env, centroid, duration, min_section_seconds - ) - - tempo = float(librosa.feature.tempo(onset_envelope=onset_env, sr=sr, aggregate=np.median) or 128) - - if len(sections) < 2 and duration > min_section_seconds * 1.5: - mid = duration / 2 - energy_first_half = float(np.mean(rms[:int(len(rms)/2)])) if len(rms) > 0 else 0.5 - energy_second_half = float(np.mean(rms[int(len(rms)/2):])) if len(rms) > 1 else 0.5 - - if energy_first_half < energy_second_half * 0.8: - sections = [ - {'kind': 'intro', 'start': 0.0, 'end': mid * 0.4, 'duration': mid * 0.4, - 'bars': max(4, int(mid * 0.4 * tempo / 60 / 4)), 'features': {'energy': energy_first_half}}, - {'kind': 'build', 'start': mid * 0.4, 'end': mid, 'duration': mid * 0.6, - 'bars': max(4, int(mid * 0.6 * tempo / 60 / 4)), 'features': {'energy': (energy_first_half + energy_second_half) / 2}}, - {'kind': 'drop', 'start': mid, 'end': duration, 'duration': mid, - 'bars': max(4, int(mid * tempo / 60 / 4)), 'features': {'energy': energy_second_half}}, - ] - else: - sections = [ - {'kind': 'verse', 'start': 0.0, 'end': mid, 'duration': mid, - 'bars': max(4, int(mid * tempo / 60 / 4)), 'features': {'energy': energy_first_half}}, - {'kind': 'drop', 'start': mid, 'end': duration, 'duration': mid, - 'bars': max(4, int(mid * tempo / 60 / 4)), 'features': {'energy': energy_second_half}}, - ] - - prev_features = None - total_sections = len(sections) - for i, section in enumerate(sections): - sec_duration = section.get('duration', 8.0) - beats_per_second = tempo / 60.0 - beats = sec_duration * beats_per_second - bars = max(4, int(round(beats / 4.0))) - section['bars'] = bars - section['beats'] = bars * 4 - section['tempo'] = round(tempo, 1) - section['section_index'] = i - section['total_sections'] = total_sections - - start_time = float(section.get('start', 0.0)) - end_time = float(section.get('end', sec_duration)) - - # Compute richer section features inline (method was in wrong class) - duration_sec = end_time - start_time - frames_per_second = sr / hop_length - start_frame = int(start_time * frames_per_second) - end_frame = int(end_time * frames_per_second) - start_frame = max(0, min(start_frame, len(rms) - 1)) - end_frame = max(start_frame + 1, min(end_frame, len(rms))) - - section_rms = rms[start_frame:end_frame] if end_frame > start_frame else np.array([0.0]) - rms_max_global = float(np.max(rms)) if len(rms) > 0 else 0.01 - energy_mean = float(np.mean(section_rms)) if len(section_rms) > 0 else 0.0 - energy_peak = float(np.max(section_rms)) if len(section_rms) > 0 else 0.0 - energy_mean_norm = min(1.0, (energy_mean / max(rms_max_global, 0.001)) * 2.0) - energy_peak_norm = min(1.0, (energy_peak / max(rms_max_global, 0.001)) * 1.5) - - richer_features = { - 'energy_mean': round(energy_mean_norm, 3), - 'energy_peak': round(energy_peak_norm, 3), - 'energy_slope': 0.0, - 'spectral_centroid_mean': 0.5, - 'spectral_centroid_std': 0.0, - 'onset_rate': 0.5, - 'low_energy_ratio': 0.3, - 'high_energy_ratio': 0.3, - } - - if 'features' not in section: - section['features'] = {} - section['features'].update(richer_features) - - kind = str(section.get('kind', 'drop')).lower() - position_ratio = start_time / max(duration, 0.001) - section['features']['total_sections'] = total_sections - - # Simple confidence calculation inline - energy = section['features'].get('energy', 0.5) - onset_density = section['features'].get('onset_density', 0.5) - - # Basic confidence based on energy and position - if kind == 'intro' and position_ratio < 0.2: - confidence = 0.7 - elif kind == 'outro' and position_ratio > 0.8: - confidence = 0.7 - elif kind == 'drop' and energy > 0.6: - confidence = 0.75 - elif kind == 'build' and 0.3 < position_ratio < 0.7: - confidence = 0.65 - elif kind == 'break' and 0.4 < position_ratio < 0.8: - confidence = 0.6 - else: - confidence = 0.5 - - section['kind_confidence'] = confidence - alternatives = [] - if confidence < 0.55: - alternatives = ['drop', 'build', 'break'] - section['kind_alternatives'] = alternatives - - prev_features = section['features'] - - sections = self._validate_section_sequence(sections, duration, tempo) - - return sections - - def _validate_section_sequence(self, sections: List[Dict[str, Any]], - duration: float, tempo: float) -> List[Dict[str, Any]]: - """Validate and potentially correct section sequence for musical coherence.""" - if len(sections) < 2: - return sections - - result = [] - sequence_issues = [] - - VALID_TRANSITIONS = { - 'intro': {'verse', 'build', 'break', 'drop'}, - 'verse': {'build', 'drop', 'break', 'verse', 'outro'}, - 'build': {'drop', 'break', 'verse'}, - 'drop': {'break', 'verse', 'build', 'outro', 'drop'}, - 'break': {'build', 'drop', 'verse', 'outro'}, - 'outro': set(), - } - - PREFERRED_FIRST = {'intro', 'verse', 'build', 'break'} - PREFERRED_LAST = {'outro', 'drop', 'break'} - - for i, section in enumerate(sections): - kind = section.get('kind', 'drop') - confidence = section.get('kind_confidence', 0.5) - alternatives = section.get('kind_alternatives', []) - - section_copy = dict(section) - - if i == 0: - if kind not in PREFERRED_FIRST: - if confidence < 0.55 and alternatives: - for alt in alternatives: - if alt in PREFERRED_FIRST: - section_copy['kind'] = alt - section_copy['sequence_correction'] = 'first_section_adjusted' - section_copy['original_kind'] = kind - break - elif confidence < 0.45: - section_copy['sequence_warning'] = f'first_section_is_{kind}' - - if i == len(sections) - 1: - if kind not in PREFERRED_LAST: - if confidence < 0.55 and alternatives: - for alt in alternatives: - if alt in PREFERRED_LAST: - section_copy['kind'] = alt - section_copy['sequence_correction'] = 'last_section_adjusted' - section_copy['original_kind'] = kind - break - elif confidence < 0.45: - section_copy['sequence_warning'] = f'last_section_is_{kind}' - - if 0 < i < len(sections) - 1: - prev_kind = sections[i - 1].get('kind', 'drop') - next_kind = sections[i + 1].get('kind', 'drop') if i + 1 < len(sections) else None - - valid_prev = kind in VALID_TRANSITIONS.get(prev_kind, set()) - - if not valid_prev and confidence < 0.60: - transition_key = f'{prev_kind}_to_{kind}' - sequence_issues.append(transition_key) - - if alternatives: - for alt in alternatives: - if alt in VALID_TRANSITIONS.get(prev_kind, set()): - if next_kind is None or next_kind in VALID_TRANSITIONS.get(alt, set()): - section_copy['kind'] = alt - section_copy['sequence_correction'] = 'transition_fixed' - section_copy['original_kind'] = kind - section_copy['invalid_transition'] = transition_key - break - - if kind == 'build': - next_kind = sections[i + 1].get('kind', '') if i < len(sections) - 1 else None - if next_kind and next_kind not in ('drop', 'break', 'verse'): - next_confidence = sections[i + 1].get('kind_confidence', 0.5) - if next_confidence < 0.60: - section_copy['build_transition_warning'] = f'build_followed_by_{next_kind}' - - if kind == 'drop': - features = section.get('features', {}) - energy = features.get('energy', 0.5) - if energy < 0.50: - section_copy['drop_energy_warning'] = f'drop_has_low_energy_{energy:.2f}' - if confidence < 0.55 and alternatives: - for alt in alternatives: - if alt in {'verse', 'build'}: - section_copy['kind'] = alt - section_copy['sequence_correction'] = 'low_energy_drop_reclassified' - section_copy['original_kind'] = 'drop' - break - - result.append(section_copy) - - if sequence_issues: - result[0]['sequence_issues'] = sequence_issues[:5] - - return result - - def _get_section_variant(self, section_kind: str, section_name: str = "") -> str: - """ - Determina la variante apropiada para una sección. - - Retorna un string como 'sparse', 'dense', 'full', etc. - """ - kind_lower = section_kind.lower() - name_lower = section_name.lower() - - # Detectar variantes especiales por nombre - if 'peak' in name_lower or 'main' in name_lower: - return 'peak' - if 'minimal' in name_lower: - return 'minimal' - if 'atmos' in name_lower: - return 'atmospheric' - - # Usar defaults por tipo - return SECTION_VARIANTS.get(kind_lower, ['standard'])[0] - - def _select_variant_samples(self, - base_samples: List[Any], - role: str, - section_variant: str, - target_key: str = None, - target_bpm: float = None) -> List[Any]: - """ - Selecciona samples apropiados para una variante de sección. - - Filtra y reordena base_samples según la variante: - - 'sparse': prefiere samples más ligeros/simples - - 'dense': prefiere samples más complejos - - 'full': usa samples principales - - 'minimal': usa samples más sutiles - """ - if not base_samples: - return base_samples - - # Por defecto, retornar sin cambios - if section_variant == 'standard': - return base_samples - - variant_samples = [] - - for sample in base_samples: - # Get sample name from the match dict - if isinstance(sample, dict): - sample_name = sample.get('file_name', '') - else: - sample_name = str(sample) - - name_lower = sample_name.lower() - - # Variant sparse/minimal: buscar keywords sutiles - if section_variant in ['sparse', 'minimal', 'atmospheric', 'fading']: - if any(kw in name_lower for kw in ['light', 'soft', 'subtle', 'simple', 'minimal', 'clean', 'thin']): - variant_samples.insert(0, sample) # Prioridad alta - elif any(kw in name_lower for kw in ['heavy', 'full', 'busy', 'complex', 'big', 'thick']): - continue # Skip para variantes sutiles - else: - variant_samples.append(sample) - - # Variant dense/full/peak: buscar keywords ricos - elif section_variant in ['dense', 'full', 'peak', 'building']: - if any(kw in name_lower for kw in ['full', 'big', 'rich', 'heavy', 'peak', 'main', 'thick']): - variant_samples.insert(0, sample) # Prioridad alta - elif any(kw in name_lower for kw in ['minimal', 'subtle', 'light', 'thin']): - continue # Skip para variantes ricas - else: - variant_samples.append(sample) - - else: - variant_samples.append(sample) - - # Si no quedan samples después del filtro, usar originals - return variant_samples if variant_samples else base_samples - - def _get_variant_samples_for_section(self, - base_samples: List[Any], - role: str, - section_kind: str, - section_name: str, - target_key: str = None, - target_bpm: float = None, - max_variants: int = 3) -> Dict[str, List[Any]]: - """ - Selecciona samples DIFERENTES para diferentes secciones de un mismo rol. - - Retorna un dict mapping section_key -> list of samples. - - Para roles variante (perc, top_loop, etc.), esto retorna samples distintos - para intro/verse/build/drop/break/outro cuando es posible. - """ - # Roles que pueden tener variación real - variant_roles = ['perc', 'perc_alt', 'top_loop', 'vocal_shot', 'synth_peak', 'atmos'] - - if role not in variant_roles or not base_samples or len(base_samples) < 3: - # No hay suficiente pool para variación - return {'all': base_samples} - - section_map = {} - - # Variantes por tipo de sección - section_types = { - 'intro': ['minimal', 'sparse'], - 'verse': ['standard', 'light'], - 'build': ['building', 'adding'], - 'drop': ['full', 'peak', 'rich'], - 'break': ['sparse', 'atmospheric'], - 'outro': ['fading', 'minimal'] - } - - # Para cada sección, seleccionar samples con preferencias diferentes - section_key = f"{section_kind}_{section_name}" - - # Determinar preferencia para esta sección - variants = section_types.get(section_kind.lower(), ['standard']) - preference = variants[0] if variants else 'standard' - - # Filtrar samples según preferencia - variant_samples = [] - remaining_samples = list(base_samples) - - for sample in remaining_samples: - # Get sample name from the match dict - if isinstance(sample, dict): - sample_name = sample.get('file_name', '') - else: - sample_name = str(sample) - - name_lower = sample_name.lower() - - # Para sparse/minimal: buscar keywords ligeros - if preference in ['minimal', 'sparse', 'atmospheric']: - if any(kw in name_lower for kw in ['light', 'soft', 'subtle', 'minimal', 'clean', 'atmos']): - variant_samples.append(sample) - elif any(kw in name_lower for kw in ['heavy', 'hard', 'full', 'big']): - continue - - # Para full/peak: buscar keywords ricos - elif preference in ['full', 'peak', 'rich', 'building']: - if any(kw in name_lower for kw in ['full', 'big', 'rich', 'heavy', 'peak', 'main']): - variant_samples.append(sample) - elif any(kw in name_lower for kw in ['minimal', 'subtle']): - continue - - else: - variant_samples.append(sample) - - # Si no encontramos suficientes, usar del pool original - if len(variant_samples) < 2: - variant_samples = base_samples[:max_variants] - - section_map[section_key] = variant_samples[:max_variants] - - return section_map - - def build_arrangement_plan(self, reference_path: str, sections: List[Dict[str, Any]], - project_bpm: float, project_key: str, - variant_seed: Optional[int] = None) -> Dict[str, Any]: - # Reset family tracking for new generation - self.reset_family_tracking() - - result = self.match_assets(reference_path) - reference = result["reference"] - matches = result["matches"] - - # Auto-detect sections if not provided or enhance existing ones - if not sections: - sections = self.detect_reference_sections(reference_path) - - offsets = self._section_offsets(sections) - rng = random.Random(variant_seed if variant_seed is not None else random.SystemRandom().randint(1, 10**9)) - - # Analyze roles per segment - segment_roles = self._analyze_segment_roles(reference, sections) - - used_paths: set = set() - used_families: set = set() - selection_order = [ - "kick", - "snare", - "hat", - "bass_loop", - "perc_loop", - "top_loop", - "synth_loop", - "vocal_loop", - "crash_fx", - "fill_fx", - "snare_roll", - "atmos_fx", - "vocal_shot", - ] - selected: Dict[str, Optional[Dict[str, Any]]] = {} - for role in selection_order: - selected[role] = self._select_distinct_candidate(role, matches.get(role, []), rng, used_paths, used_families) - - perc_candidates = [ - item for item in matches.get("perc_loop", []) - if self._candidate_path(item) != self._candidate_path(selected.get("perc_loop")) - ] - perc_alt = self._select_distinct_candidate("perc_loop", perc_candidates, rng, used_paths, used_families) if perc_candidates else None - synth_candidates = [ - item for item in matches.get("synth_loop", []) - if self._candidate_path(item) != self._candidate_path(selected.get("synth_loop")) - ] - synth_alt = self._select_distinct_candidate("synth_loop", synth_candidates, rng, used_paths, used_families) if synth_candidates else None - vocal_candidates = [ - item for item in matches.get("vocal_loop", []) - if self._candidate_path(item) != self._candidate_path(selected.get("vocal_loop")) - ] - vocal_alt = self._select_distinct_candidate("vocal_loop", vocal_candidates, rng, used_paths, used_families) if vocal_candidates else None - - def add_range(target: List[Tuple[float, Dict]], start: float, end: float, step: float, offset: float = 0.0, sample: Dict = None): - if sample is None: - return - cursor = start + offset - while cursor < end - 0.01: - target.append((round(float(cursor), 3), sample)) - cursor += step - - def add_hit(target: List[Tuple[float, Dict]], position: float, sample: Dict = None): - if position >= 0.0 and sample is not None: - target.append((round(float(position), 3), sample)) - - kick_positions: List[Tuple[float, Dict]] = [] - snare_positions: List[Tuple[float, Dict]] = [] - hat_positions: List[Tuple[float, Dict]] = [] - bass_positions: List[Tuple[float, Dict]] = [] - perc_positions: List[Tuple[float, Dict]] = [] - perc_alt_positions: List[Tuple[float, Dict]] = [] - top_loop_positions: List[Tuple[float, Dict]] = [] - synth_positions: List[Tuple[float, Dict]] = [] - synth_peak_positions: List[Tuple[float, Dict]] = [] - vocal_positions: List[Tuple[float, Dict]] = [] - vocal_build_positions: List[Tuple[float, Dict]] = [] - vocal_peak_positions: List[Tuple[float, Dict]] = [] - crash_positions: List[Tuple[float, Dict]] = [] - fill_positions: List[Tuple[float, Dict]] = [] - snare_roll_positions: List[Tuple[float, Dict]] = [] - atmos_positions: List[Tuple[float, Dict]] = [] - vocal_shot_positions: List[Tuple[float, Dict]] = [] - - bass_step = self._loop_step_beats(selected.get("bass_loop"), project_bpm, 16.0) - perc_step = self._loop_step_beats(selected.get("perc_loop"), project_bpm, 16.0) - perc_alt_step = self._loop_step_beats(perc_alt, project_bpm, 8.0) - top_loop_step = self._loop_step_beats(selected.get("top_loop"), project_bpm, 8.0) - synth_step = self._loop_step_beats(selected.get("synth_loop"), project_bpm, 16.0) - vocal_step = self._loop_step_beats(selected.get("vocal_loop"), project_bpm, 8.0) - vocal_alt_step = self._loop_step_beats(vocal_alt, project_bpm, 8.0) - synth_alt_step = self._loop_step_beats(synth_alt, project_bpm, 8.0) - atmos_step = self._loop_step_beats(selected.get("atmos_fx"), project_bpm, 16.0) - - # Store section-specific samples for roles eligible for variation - section_samples: Dict[int, Dict[str, Optional[Dict[str, Any]]]] = {} - - for index, (section, start, end) in enumerate(offsets): - kind = str(section.get("kind", "drop")).lower() - section_name = str(section.get("name", "")).lower() - midpoint = (start + end) / 2.0 - progress = midpoint / max(1.0, offsets[-1][2]) - energy = self._section_energy(reference, progress) - is_peak = "peak" in section_name or energy > 0.82 - is_vocal = "vocal" in section_name - span = max(4.0, end - start) - has_next_section = index < len(offsets) - 1 - next_section = offsets[index + 1][0] if has_next_section else {} - next_kind = str(next_section.get("kind", "")).lower() - next_name = str(next_section.get("name", "")).lower() - transition_into_drop = next_kind == "drop" or "drop" in next_name or "peak" in next_name - transition_is_vocal = "vocal" in next_name - tail_hit = max(start, end - min(4.0, span / 2.0)) - roll_start = max(start, end - min(8.0, span)) - - # Apply section variation for eligible roles - section_variant = self._get_section_variant(kind, section.get('name', '')) - section_samples[index] = {} - - # Map roles to their match lists and global selections - role_match_map = { - 'perc': ('perc_loop', matches.get('perc_loop', []), selected.get('perc_loop')), - 'perc_alt': ('perc_loop', matches.get('perc_loop', []), perc_alt), - 'top_loop': ('top_loop', matches.get('top_loop', []), selected.get('top_loop')), - 'vocal_shot': ('vocal_shot', matches.get('vocal_shot', []), selected.get('vocal_shot')), - 'synth_peak': ('synth_loop', matches.get('synth_loop', []), synth_alt), - 'atmos': ('atmos_fx', matches.get('atmos_fx', []), selected.get('atmos_fx')), - } - - for var_role, (match_role, match_list, fallback_sample) in role_match_map.items(): - if var_role in SECTION_VARIATION_ROLES and match_list and section_variant != 'standard': - # Apply variant filtering with section-specific samples - section_samples_map = self._get_variant_samples_for_section( - match_list, - var_role, - kind, - section.get('name', ''), - target_key=project_key, - target_bpm=project_bpm - ) - - # Get section-specific samples for this role - section_key = f"{kind}_{section.get('name', '')}" - specific_samples = section_samples_map.get(section_key, match_list) - - # Use specific_samples for selection - samples_to_use = specific_samples if specific_samples else match_list - - if samples_to_use and samples_to_use != match_list: - # Select from section-specific samples, avoiding already used paths - section_used_paths = used_paths.copy() - section_sample = self._select_distinct_candidate( - match_role, - samples_to_use, - rng, - section_used_paths, - used_families - ) - - if section_sample: - # Get the actual file path for logging - sample_path = section_sample.get('file_path', section_sample.get('file_name', 'unknown')) - logger.debug("SECTION_VARIANT_REAL: role '%s' using %d specific samples for section '%s' (vs %d base) - selected: %s", - var_role, len(samples_to_use), section.get('name'), len(match_list), sample_path) - section_samples[index][var_role] = section_sample - else: - # Fallback to global selection - section_samples[index][var_role] = fallback_sample - else: - # No filtering applied or no samples after filter, use global - section_samples[index][var_role] = fallback_sample - else: - # Not eligible for variation or no variant, use global - section_samples[index][var_role] = fallback_sample - - # Helper to get the right sample for a role in this section - def get_sample(role: str, fallback: Optional[Dict[str, Any]] = None) -> Optional[Dict[str, Any]]: - """Get section-specific sample if available, otherwise fallback.""" - return section_samples[index].get(role, fallback) - - atmos_sample = get_sample('atmos', selected.get("atmos_fx")) - if atmos_sample and kind in {"intro", "break", "outro"}: - add_range(atmos_positions, start, end, max(8.0, atmos_step), sample=atmos_sample) - elif atmos_sample and is_vocal and span >= 8.0: - add_hit(atmos_positions, max(start, end - 8.0), sample=atmos_sample) - - if kind == 'intro': - add_range(kick_positions, start, end, 2.0 if energy < 0.55 else 1.0, sample=selected.get("kick")) - add_range(hat_positions, start, end, 1.0, 0.5, sample=selected.get("hat")) - if selected.get("top_loop") and energy > 0.5: - add_range(top_loop_positions, start + min(4.0, span / 2.0), end, top_loop_step, 0.0, sample=get_sample('top_loop', selected.get("top_loop"))) - elif kind == 'break': - add_range(kick_positions, start, end, 4.0, sample=selected.get("kick")) - add_range(snare_positions, start + 3.0, end, 4.0, sample=selected.get("snare")) - if selected.get("perc_loop"): - perc_sample = get_sample('perc_alt', perc_alt) if perc_alt else get_sample('perc', selected.get("perc_loop")) - add_range(perc_alt_positions if perc_alt else perc_positions, start, end, perc_alt_step if perc_alt else perc_step, sample=perc_sample) - if vocal_alt and (is_vocal or energy > 0.6): - add_range(vocal_build_positions, start + max(0.0, span - 8.0), end, vocal_alt_step, sample=vocal_alt) - if selected.get("fill_fx") and has_next_section: - add_hit(fill_positions, tail_hit, sample=selected.get("fill_fx")) - if selected.get("snare_roll") and has_next_section: - add_hit(snare_roll_positions, roll_start, sample=selected.get("snare_roll")) - elif kind == 'build': - add_range(kick_positions, start, end, 1.0, sample=selected.get("kick")) - add_range(snare_positions, start + 1.0, end, 2.0, sample=selected.get("snare")) - add_range(hat_positions, start, end, 0.5, 0.5, sample=selected.get("hat")) - if selected.get("bass_loop"): - add_range(bass_positions, start, end, bass_step, sample=selected.get("bass_loop")) - if selected.get("perc_loop"): - add_range(perc_positions, start, end, perc_step, sample=get_sample('perc', selected.get("perc_loop"))) - if selected.get("top_loop"): - add_range(top_loop_positions, start + 4.0, end, top_loop_step, sample=get_sample('top_loop', selected.get("top_loop"))) - if selected.get("vocal_loop") and is_vocal: - add_range(vocal_positions, start, end, vocal_step, sample=selected.get("vocal_loop")) - if vocal_alt and (is_vocal or energy > 0.58): - add_range(vocal_build_positions, start, end, vocal_alt_step, 0.0, sample=vocal_alt) - if selected.get("synth_loop") and energy > 0.62: - add_range(synth_positions, max(start, end - max(8.0, synth_step)), end, synth_step, sample=selected.get("synth_loop")) - if selected.get("snare_roll"): - add_hit(snare_roll_positions, roll_start, sample=selected.get("snare_roll")) - if selected.get("fill_fx"): - add_hit(fill_positions, tail_hit, sample=selected.get("fill_fx")) - if transition_into_drop and selected.get("crash_fx"): - add_hit(crash_positions, end, sample=selected.get("crash_fx")) - else: - add_range(kick_positions, start, end, 1.0, sample=selected.get("kick")) - add_range(snare_positions, start + 1.0, end, 2.0, sample=selected.get("snare")) - add_range(hat_positions, start, end, 0.5, 0.5, sample=selected.get("hat")) - if selected.get("bass_loop"): - add_range(bass_positions, start, end, bass_step, sample=selected.get("bass_loop")) - if selected.get("perc_loop"): - add_range(perc_positions, start, end, perc_step, sample=get_sample('perc', selected.get("perc_loop"))) - if selected.get("top_loop"): - add_range(top_loop_positions, start, end, top_loop_step, sample=get_sample('top_loop', selected.get("top_loop"))) - if perc_alt and ("peak" in str(section.get("name", "")).lower() or energy > 0.82): - add_range(perc_alt_positions, start, end, perc_alt_step, sample=get_sample('perc_alt', perc_alt)) - if selected.get("synth_loop") and ("drop b" in section_name or is_peak or kind == 'drop'): - add_range(synth_positions, start, end, synth_step, sample=selected.get("synth_loop")) - if synth_alt and is_peak: - add_range(synth_peak_positions, start + min(4.0, span / 4.0), end, synth_alt_step, sample=get_sample('synth_peak', synth_alt)) - if selected.get("vocal_loop") and ("drop b" in section_name or is_peak): - add_range(vocal_positions, start + 4.0, end, vocal_step, sample=selected.get("vocal_loop")) - if vocal_alt and is_peak: - add_range(vocal_peak_positions, start, end, vocal_alt_step, sample=vocal_alt) - if selected.get("crash_fx") and index > 0: - add_hit(crash_positions, start, sample=selected.get("crash_fx")) - if selected.get("fill_fx") and has_next_section and next_kind != "outro": - add_hit(fill_positions, tail_hit, sample=selected.get("fill_fx")) - - vocal_shot_sample = get_sample('vocal_shot', selected.get("vocal_shot")) - if vocal_shot_sample and (is_peak or transition_is_vocal): - add_hit(vocal_shot_positions, min(end - 1.0, start + 4.0), sample=vocal_shot_sample) - if span >= 16.0: - add_hit(vocal_shot_positions, min(end - 1.0, start + span / 2.0), sample=vocal_shot_sample) - - layers: List[Dict[str, Any]] = [] - - def add_layer(name: str, asset: Optional[Dict[str, Any]], positions: List[Tuple[float, Dict]], - color: int, volume: float): - """Add one or more layers for positions grouped by sample.""" - if not positions: - return - - # Group positions by sample - positions_by_sample: Dict[str, List[float]] = {} - sample_info: Dict[str, Dict[str, Any]] = {} - - for pos, sample in positions: - if sample is None: - continue - sample_path = sample.get("path", "") - if sample_path not in positions_by_sample: - positions_by_sample[sample_path] = [] - sample_info[sample_path] = sample - positions_by_sample[sample_path].append(pos) - - # If no asset provided but positions exist, use the first sample - if asset is None and positions_by_sample: - first_sample_path = next(iter(positions_by_sample)) - asset = sample_info[first_sample_path] - - # If all positions use the same sample (or asset is provided), create single layer - if asset and (len(positions_by_sample) == 1 or asset.get("path") in positions_by_sample): - asset_positions = positions_by_sample.get(asset.get("path", ""), [p for p, _ in positions]) - if asset_positions: - adj_vol = volume - rms = asset.get("rms_energy", 0.0) - if rms > 0.0: - adj_vol = min(1.0, volume * ((0.2 / rms) ** 0.5)) - - layers.append({ - "name": name, - "file_path": asset["path"], - "positions": sorted(set(asset_positions)), - "color": color, - "volume": round(adj_vol, 3), - "source": asset.get("file_name", ""), - }) - else: - # Multiple samples - create layers with variant names - for i, (sample_path, pos_list) in enumerate(positions_by_sample.items()): - sample = sample_info[sample_path] - variant_name = sample.get("file_name", "") - - adj_vol = volume - rms = sample.get("rms_energy", 0.0) - if rms > 0.0: - adj_vol = min(1.0, volume * ((0.2 / rms) ** 0.5)) - - # Create variant suffix based on sample characteristics - if i > 0: - layer_name = f"{name} ({variant_name[:20]})" - else: - layer_name = name - - layers.append({ - "name": layer_name, - "file_path": sample_path, - "positions": sorted(set(pos_list)), - "color": color, - "volume": round(adj_vol, 3), - "source": variant_name, - }) - - add_layer("AUDIO KICK", selected.get("kick"), kick_positions, 10, 0.86) - add_layer("AUDIO CLAP", selected.get("snare"), snare_positions, 45, 0.72) - add_layer("AUDIO HAT", selected.get("hat"), hat_positions, 5, 0.58) - add_layer("AUDIO BASS LOOP", selected.get("bass_loop"), bass_positions, 30, 0.76) - add_layer("AUDIO PERC MAIN", selected.get("perc_loop"), perc_positions, 20, 0.68) - add_layer("AUDIO PERC ALT", perc_alt, perc_alt_positions, 22, 0.62) - add_layer("AUDIO TOP LOOP", selected.get("top_loop") or perc_alt or selected.get("perc_loop"), top_loop_positions, 24, 0.52) - add_layer("AUDIO SYNTH LOOP", selected.get("synth_loop"), synth_positions, 50, 0.52) - add_layer("AUDIO SYNTH PEAK", synth_alt or selected.get("synth_loop"), synth_peak_positions, 52, 0.48) - add_layer("AUDIO VOCAL LOOP", selected.get("vocal_loop"), vocal_positions, 40, 0.6) - add_layer("AUDIO VOCAL BUILD", vocal_alt or selected.get("vocal_loop"), vocal_build_positions, 42, 0.54) - add_layer("AUDIO VOCAL PEAK", vocal_alt or selected.get("vocal_loop"), vocal_peak_positions, 43, 0.58) - add_layer("AUDIO CRASH FX", selected.get("crash_fx"), crash_positions, 26, 0.5) - add_layer("AUDIO TRANSITION FILL", selected.get("fill_fx") or selected.get("snare_roll"), fill_positions, 28, 0.56) - add_layer("AUDIO SNARE ROLL", selected.get("snare_roll"), snare_roll_positions, 27, 0.54) - add_layer("AUDIO ATMOS", selected.get("atmos_fx"), atmos_positions, 54, 0.44) - add_layer("AUDIO VOCAL SHOT", selected.get("vocal_shot"), vocal_shot_positions, 41, 0.52) - - # Compute remake quality metrics - remake_quality = self._compute_remake_quality_metrics( - sections, selected, sections - ) - - # Build section energy profile for generator - section_energy_profile = [] - for section in sections: - features = section.get('features', {}) - section_energy_profile.append({ - 'kind': section.get('kind', 'drop'), - 'energy_mean': features.get('energy_mean', features.get('energy', 0.5)), - 'energy_peak': features.get('energy_peak', 0.5), - 'energy_slope': features.get('energy_slope', 0.0), - 'spectral_centroid_mean': features.get('spectral_centroid_mean', features.get('brightness', 0.5)), - 'spectral_centroid_std': features.get('spectral_centroid_std', 0.0), - 'onset_rate': features.get('onset_rate', features.get('onset_density', 0.5)), - 'low_energy_ratio': features.get('low_energy_ratio', 0.0), - 'high_energy_ratio': features.get('high_energy_ratio', 0.0), - 'kind_confidence': section.get('kind_confidence', 0.5), - }) - - return { - "reference": { - "path": reference.get("path"), - "file_name": reference.get("file_name"), - "tempo": reference.get("tempo"), - "key": reference.get("key") or project_key, - "device": self.device_name, - "variant_seed": variant_seed, - }, - "sections": sections, - "segment_roles": segment_roles, - "layers": layers, - "matches": selected, - "section_samples": section_samples, - "section_energy_profile": section_energy_profile, - "remake_quality": remake_quality, - } - - def _compute_remake_quality_metrics( - self, - sections: List[Dict[str, Any]], - selected: Dict[str, Optional[Dict[str, Any]]], - reference_sections: List[Dict[str, Any]] - ) -> Dict[str, Any]: - """ - Compute per-section quality scores for how well selected samples match reference character. - - Metrics included: - - Energy profile similarity - - Spectral characteristic similarity - - Rhythmic density comparison - - Low-end presence matching - - High-end brightness matching - - Uses already-computed data - no new librosa calls. - """ - section_scores = [] - - energy_profile_scores = [] - spectral_similarity_scores = [] - rhythmic_density_scores = [] - low_end_presence_scores = [] - high_end_brightness_scores = [] - - for i, section in enumerate(sections): - kind = str(section.get('kind', 'drop')).lower() - features = section.get('features', {}) - section_match_score = 0.5 - weak_roles = [] - - ref_energy_mean = features.get('energy_mean', features.get('energy', 0.5)) - _ = features.get('energy_peak', ref_energy_mean) - ref_energy_slope = features.get('energy_slope', 0.0) - ref_onset_rate = features.get('onset_rate', features.get('onset_density', 0.5)) - ref_low_ratio = features.get('low_energy_ratio', 0.0) - ref_high_ratio = features.get('high_energy_ratio', 0.0) - ref_spectral_centroid = features.get('spectral_centroid_mean', features.get('brightness', 0.5)) - ref_spectral_std = features.get('spectral_centroid_std', 0.0) - - energy_profile_score = 0.5 - spectral_similarity_score = 0.5 - rhythmic_density_score = 0.5 - low_end_presence_score = 0.5 - high_end_brightness_score = 0.5 - - selected_samples_energy = [] - selected_samples_centroid = [] - selected_samples_onset = [] - selected_samples_low_energy = 0.0 - selected_samples_high_energy = 0.0 - - for role in ['kick', 'snare', 'hat', 'bass_loop', 'perc_loop', 'top_loop', 'synth_loop', 'vocal_loop', 'atmos_fx']: - sample = selected.get(role) - if sample: - rms = float(sample.get('rms_mean', sample.get('rms_energy', 0.5)) or 0.5) - centroid = float(sample.get('spectral_centroid', 5000) or 5000) - onset = float(sample.get('onset_mean', sample.get('onset_rate', 3)) or 3) - - selected_samples_energy.append(rms) - selected_samples_centroid.append(centroid) - selected_samples_onset.append(onset) - - if centroid < 300: - selected_samples_low_energy += rms - if centroid > 4000: - selected_samples_high_energy += rms - - if selected_samples_energy: - avg_energy = sum(selected_samples_energy) / len(selected_samples_energy) - energy_diff = abs(avg_energy - ref_energy_mean) - energy_profile_score = max(0.0, 1.0 - energy_diff * 2.0) - - if ref_energy_slope > 0.1: - build_roles = ['snare_roll', 'fill_fx', 'hat'] - build_energy = sum( - float(selected.get(r, {}).get('rms_mean', 0) or 0) - for r in build_roles if selected.get(r) - ) - if build_energy > 0.3: - energy_profile_score = min(1.0, energy_profile_score + 0.15) - - if selected_samples_centroid: - avg_centroid_norm = sum(selected_samples_centroid) / len(selected_samples_centroid) / 10000.0 - ref_centroid_norm = ref_spectral_centroid - centroid_diff = abs(avg_centroid_norm - ref_centroid_norm) - spectral_similarity_score = max(0.0, 1.0 - centroid_diff) - - if ref_spectral_std > 0.3: - centroid_variance = 0.0 - if len(selected_samples_centroid) > 1: - centroid_variance = float(np.std(selected_samples_centroid)) / 10000.0 - if centroid_variance > 0.1: - spectral_similarity_score = min(1.0, spectral_similarity_score + 0.1) - - if selected_samples_onset: - avg_onset_norm = sum(selected_samples_onset) / len(selected_samples_onset) / 10.0 - ref_onset_norm = ref_onset_rate - onset_diff = abs(avg_onset_norm - ref_onset_norm) - rhythmic_density_score = max(0.0, 1.0 - onset_diff) - - if ref_onset_rate > 0.5: - perc_onset = float(selected.get('perc_loop', {}).get('onset_mean', 0) or 0) - top_onset = float(selected.get('top_loop', {}).get('onset_mean', 0) or 0) - hat_onset = float(selected.get('hat', {}).get('onset_mean', 0) or 0) - if perc_onset > 3 or top_onset > 3 or hat_onset > 3: - rhythmic_density_score = min(1.0, rhythmic_density_score + 0.15) - - bass_match = selected.get('bass_loop') - kick_match = selected.get('kick') - if bass_match or kick_match: - bass_centroid = float(bass_match.get('spectral_centroid', 500) or 500) if bass_match else 500 - kick_centroid = float(kick_match.get('spectral_centroid', 300) or 300) if kick_match else 300 - low_centroid_avg = (bass_centroid + kick_centroid) / 2 - - if ref_low_ratio > 0.3: - if low_centroid_avg < 1500: - low_end_presence_score = 0.85 + (ref_low_ratio * 0.15) - elif low_centroid_avg < 2500: - low_end_presence_score = 0.65 - else: - low_end_presence_score = 0.35 - weak_roles.append('bass_loop') - else: - low_end_presence_score = 0.7 - else: - if ref_low_ratio > 0.35: - low_end_presence_score = 0.3 - weak_roles.append('bass_loop') - - hat_match = selected.get('hat') - top_match = selected.get('top_loop') - synth_match = selected.get('synth_loop') - if hat_match or top_match or synth_match: - high_centroids = [] - if hat_match: - high_centroids.append(float(hat_match.get('spectral_centroid', 6000) or 6000)) - if top_match: - high_centroids.append(float(top_match.get('spectral_centroid', 5000) or 5000)) - if synth_match: - high_centroids.append(float(synth_match.get('spectral_centroid', 4000) or 4000)) - - avg_high_centroid = sum(high_centroids) / len(high_centroids) if high_centroids else 5000 - - if ref_high_ratio > 0.25: - if avg_high_centroid > 7000: - high_end_brightness_score = 0.85 + (ref_high_ratio * 0.15) - elif avg_high_centroid > 5000: - high_end_brightness_score = 0.65 - else: - high_end_brightness_score = 0.4 - weak_roles.append('hat') - else: - high_end_brightness_score = 0.7 - else: - if ref_high_ratio > 0.3: - high_end_brightness_score = 0.35 - weak_roles.append('hat') - - if kind == 'drop': - if bass_match and ref_energy_mean > 0.6: - section_match_score += 0.08 - if hat_match and ref_onset_rate > 0.4: - section_match_score += 0.05 - elif kind == 'break': - atmos_match = selected.get('atmos_fx') - if atmos_match and ref_energy_mean < 0.45: - section_match_score += 0.10 - low_end_presence_score = min(1.0, low_end_presence_score + 0.1) - elif kind == 'build': - snare_roll_match = selected.get('snare_roll') - fill_match = selected.get('fill_fx') - if snare_roll_match and ref_energy_slope > 0.05: - section_match_score += 0.08 - rhythmic_density_score = min(1.0, rhythmic_density_score + 0.1) - if fill_match: - section_match_score += 0.05 - elif kind == 'intro': - atmos_match = selected.get('atmos_fx') - if atmos_match: - section_match_score += 0.05 - elif kind == 'outro': - atmos_match = selected.get('atmos_fx') - if atmos_match and ref_energy_mean < 0.4: - section_match_score += 0.05 - - energy_profile_scores.append(energy_profile_score) - spectral_similarity_scores.append(spectral_similarity_score) - rhythmic_density_scores.append(rhythmic_density_score) - low_end_presence_scores.append(low_end_presence_score) - high_end_brightness_scores.append(high_end_brightness_score) - - combined_score = ( - energy_profile_score * 0.20 + - spectral_similarity_score * 0.20 + - rhythmic_density_score * 0.20 + - low_end_presence_score * 0.20 + - high_end_brightness_score * 0.20 - ) - section_match_score = max(section_match_score, combined_score) - section_match_score = max(0.0, min(1.0, section_match_score)) - - section_scores.append({ - 'kind': kind, - 'score': round(section_match_score, 3), - 'weak_roles': weak_roles, - 'energy_profile_score': round(energy_profile_score, 3), - 'spectral_similarity_score': round(spectral_similarity_score, 3), - 'rhythmic_density_score': round(rhythmic_density_score, 3), - 'low_end_presence_score': round(low_end_presence_score, 3), - 'high_end_brightness_score': round(high_end_brightness_score, 3), - }) - - overall_score = sum(s['score'] for s in section_scores) / max(len(section_scores), 1) - - avg_energy_profile = sum(energy_profile_scores) / max(len(energy_profile_scores), 1) - avg_spectral = sum(spectral_similarity_scores) / max(len(spectral_similarity_scores), 1) - avg_rhythmic = sum(rhythmic_density_scores) / max(len(rhythmic_density_scores), 1) - avg_low_end = sum(low_end_presence_scores) / max(len(low_end_presence_scores), 1) - avg_high_end = sum(high_end_brightness_scores) / max(len(high_end_brightness_scores), 1) - - improvement_hints = [] - for section_score in section_scores: - for role in section_score.get('weak_roles', []): - hint = f"{section_score['kind']} section needs better {role} samples" - if hint not in improvement_hints: - improvement_hints.append(hint) - - if avg_energy_profile < 0.5: - improvement_hints.append("Overall energy profile mismatch - adjust sample dynamics") - if avg_spectral < 0.5: - improvement_hints.append("Spectral characteristics differ - check brightness/texture match") - if avg_rhythmic < 0.5: - improvement_hints.append("Rhythmic density mismatch - adjust percussive element selection") - if avg_low_end < 0.5: - improvement_hints.append("Low-end presence weak - select bass/kick with more sub energy") - if avg_high_end < 0.5: - improvement_hints.append("High-end brightness lacking - select brighter hat/top samples") - - return { - 'remake_score': round(overall_score, 3), - 'section_scores': [ - { - 'kind': s['kind'], - 'score': s['score'], - 'weak_roles': s['weak_roles'], - } - for s in section_scores - ], - 'improvement_hints': improvement_hints[:10], - 'metric_averages': { - 'energy_similarity': round(avg_energy_profile, 3), - 'spectral_similarity': round(avg_spectral, 3), - 'rhythmic_density': round(avg_rhythmic, 3), - 'low_end_match': round(avg_low_end, 3), - 'high_end_match': round(avg_high_end, 3), - }, - 'metrics_detail': { - 'energy_similarity': { - 'description': 'RMS energy distribution comparison between selected samples and reference section energy', - 'range': '0.0-1.0, higher is better', - 'weight': 0.22, - 'factors': ['average RMS match', 'energy slope for builds', 'peak energy variance'], - }, - 'spectral_similarity': { - 'description': 'Spectral centroid and variance matching', - 'range': '0.0-1.0, higher is better', - 'weight': 0.18, - 'factors': ['centroid mean match', 'centroid variance match'], - }, - 'rhythmic_density': { - 'description': 'Onset rate comparison between selected samples and reference', - 'range': '0.0-1.0, higher is better', - 'weight': 0.22, - 'factors': ['onset rate match', 'percussive element density'], - }, - 'low_end_match': { - 'description': 'Sub-bass and low frequency content presence matching', - 'range': '0.0-1.0, higher is better', - 'weight': 0.20, - 'factors': ['bass spectral centroid', 'kick spectral centroid', 'low frequency RMS'], - }, - 'high_end_match': { - 'description': 'High frequency brightness and air content matching', - 'range': '0.0-1.0, higher is better', - 'weight': 0.18, - 'factors': ['hat spectral centroid', 'top loop brightness', 'synth high frequency content'], - }, - }, - } - - -def export_segment_rag_manifest( - manifest: List[Dict[str, Any]], - output_path: Path, - format: str = "json", -) -> None: - output_path = Path(output_path) - output_path.parent.mkdir(parents=True, exist_ok=True) - - if format == "json": - output_path.write_text(json.dumps(manifest, indent=2), encoding="utf-8") - return - - cached = [item for item in manifest if item.get("cached")] - built = [item for item in manifest if not item.get("cached")] - total_cached_segments = sum(int(item.get("segments", 0) or 0) for item in cached) - total_built_segments = sum(int(item.get("segments", 0) or 0) for item in built) - - lines = [ - "# Segment RAG Index Manifest", - "", - f"Generated: {time.strftime('%Y-%m-%d %H:%M:%S')}", - f"Total Files: {len(manifest)}", - "", - "## Summary", - "", - f"- Cached (reused): {len(cached)}", - f"- Built (analyzed): {len(built)}", - f"- Cached segments: {total_cached_segments}", - f"- Built segments: {total_built_segments}", - ] - - if cached: - lines.extend(["", "## Cached Files (Reused)", ""]) - for item in sorted(cached, key=lambda value: value.get("file_name", "").lower()): - lines.append(f"- **{item.get('file_name', 'unknown')}**") - lines.append(f" - Roles: {', '.join(item.get('roles', []))}") - lines.append(f" - Segments: {int(item.get('segments', 0) or 0)}") - - if built: - lines.extend(["", "## Built Files (Analyzed)", ""]) - for item in sorted(built, key=lambda value: value.get("file_name", "").lower()): - lines.append(f"- **{item.get('file_name', 'unknown')}**") - lines.append(f" - Roles: {', '.join(item.get('roles', []))}") - lines.append(f" - Segments: {int(item.get('segments', 0) or 0)}") - - output_path.write_text("\n".join(lines) + "\n", encoding="utf-8") - - -def _get_segment_rag_status(library_dir: Path) -> Dict[str, Any]: - """ - Get status of the segment RAG cache with human-readable metadata. - - For each cache file, tries to recover metadata from: - 1. Embedded metadata in the cache file (new format) - 2. indexing_state.json lookup (backfill source) - - Returns stats about metadata coverage and human-readable names. - """ - cache_dir = library_dir / ".segment_rag" - - if not cache_dir.exists(): - return { - "cache_dir": str(cache_dir), - "cache_files": 0, - "total_segments": 0, - "status": "not_built" - } - - cache_files = list(cache_dir.glob("*.json.gz")) - total_segments = 0 - role_coverage: Dict[str, int] = defaultdict(int) - entries: List[Dict[str, Any]] = [] - state_path = cache_dir / "indexing_state.json" - state_payload: Dict[str, Any] = {} - if state_path.exists(): - try: - state_payload = json.loads(state_path.read_text(encoding="utf-8")) - except Exception: - state_payload = {} - indexed_entries = state_payload.get("indexed_entries", {}) or {} - - # Build lookup by cache_prefix for state entries - by_prefix: Dict[str, Dict[str, Any]] = {} - for entry in indexed_entries.values(): - cache_prefix = entry.get("cache_prefix") - if cache_prefix: - by_prefix[cache_prefix] = entry - - # Track metadata coverage - files_with_embedded_metadata = 0 - files_with_state_metadata = 0 - files_without_metadata = 0 - - for cache_file in cache_files: - try: - with gzip.open(cache_file, "rt", encoding="utf-8") as handle: - payload = json.load(handle) - if isinstance(payload, list): - segments = payload - metadata = {} - elif isinstance(payload, dict): - segments = payload.get("segments", []) or [] - metadata = payload.get("metadata", {}) or {} - else: - segments = [] - metadata = {} - total_segments += len(segments) - - # Extract cache prefix from filename (format: {path_key}__{fingerprint}__{windows}__{duration}.json.gz) - cache_stem = cache_file.name[:-8] if cache_file.name.endswith(".json.gz") else cache_file.stem - cache_prefix = cache_stem.rsplit("__", 1)[0] - - # Look up metadata from state file - state_entry = by_prefix.get(cache_prefix, {}) - - # Determine metadata source - has_embedded = bool(metadata) - has_state = bool(state_entry) - - if has_embedded: - files_with_embedded_metadata += 1 - elif has_state: - files_with_state_metadata += 1 - else: - files_without_metadata += 1 - - # Merge metadata: prefer embedded, fallback to state - file_name = metadata.get("file_name") or state_entry.get("file_name") or cache_file.name - file_path = metadata.get("path") or state_entry.get("path") or "" - roles = metadata.get("roles") or state_entry.get("roles") or [] - - # Determine if the name is human-readable (not just a hash) - is_hash_name = len(cache_stem.split("__")[0]) == 16 and all(c in "0123456789abcdef" for c in cache_stem.split("__")[0]) - has_human_name = file_name != cache_file.name and not is_hash_name - - for role in roles: - if role: - role_coverage[role] += len(segments) - - mtime = cache_file.stat().st_mtime - - entries.append({ - "file_name": file_name, - "path": file_path, - "segments": len(segments), - "mtime": mtime, - "cache_file": cache_file.name, - "roles": roles, - "has_embedded_metadata": has_embedded, - "has_human_readable_name": has_human_name, - }) - except Exception: - logger.debug("Failed to inspect segment cache %s", cache_file, exc_info=True) - - entries.sort(key=lambda item: item["mtime"], reverse=True) - - # Calculate cache size - cache_size_bytes = sum(f.stat().st_size for f in cache_files) - cache_size_mb = round(cache_size_bytes / (1024 * 1024), 2) - - return { - "cache_dir": str(cache_dir), - "cache_files": len(cache_files), - "total_segments": total_segments, - "role_coverage": dict(role_coverage) if role_coverage else {}, - "newest_entries": entries[:5], - "oldest_entries": entries[-5:] if len(entries) > 5 else [], - "metadata_coverage": { - "files_with_embedded_metadata": files_with_embedded_metadata, - "files_with_state_metadata": files_with_state_metadata, - "files_without_metadata": files_without_metadata, - }, - "cache_size_mb": cache_size_mb, - "indexing_complete": state_payload.get("complete", False), - "last_indexed": state_payload.get("timestamp"), - "status": "ok" - } - - -def _backfill_segment_cache_metadata(library_dir: Path, force: bool = False) -> Dict[str, Any]: - """ - Backfill metadata into existing segment cache files. - - For cache files that don't have embedded metadata, this function: - 1. Looks up the file in indexing_state.json - 2. Rewrites the cache file with metadata included - - Args: - library_dir: Path to the audio library - force: If True, rewrite all cache files even if they already have metadata - - Returns: - Dict with backfill statistics - """ - cache_dir = library_dir / ".segment_rag" - - if not cache_dir.exists(): - return { - "cache_dir": str(cache_dir), - "backfilled": 0, - "skipped": 0, - "errors": 0, - "status": "no_cache" - } - - # Load state file for metadata lookup - state_path = cache_dir / "indexing_state.json" - state_payload: Dict[str, Any] = {} - if state_path.exists(): - try: - state_payload = json.loads(state_path.read_text(encoding="utf-8")) - except Exception: - state_payload = {} - - indexed_entries = state_payload.get("indexed_entries", {}) or {} - - # Build lookup by cache_prefix - by_prefix: Dict[str, Dict[str, Any]] = {} - for entry in indexed_entries.values(): - cache_prefix = entry.get("cache_prefix") - if cache_prefix: - by_prefix[cache_prefix] = entry - - cache_files = list(cache_dir.glob("*.json.gz")) - backfilled = 0 - skipped = 0 - errors = 0 - - for cache_file in cache_files: - try: - with gzip.open(cache_file, "rt", encoding="utf-8") as handle: - payload = json.load(handle) - - # Check if already has metadata - if isinstance(payload, dict): - segments = payload.get("segments", []) or [] - metadata = payload.get("metadata", {}) or {} - if metadata and not force: - skipped += 1 - continue - elif isinstance(payload, list): - segments = payload - metadata = {} - else: - continue - - # Extract cache prefix from filename - cache_stem = cache_file.name[:-8] if cache_file.name.endswith(".json.gz") else cache_file.stem - cache_prefix = cache_stem.rsplit("__", 1)[0] - - # Look up metadata from state - state_entry = by_prefix.get(cache_prefix, {}) - file_name = metadata.get("file_name") or state_entry.get("file_name") or cache_stem - roles = metadata.get("roles") or state_entry.get("roles") or [] - path = metadata.get("path") or state_entry.get("path") or "" - - # If we found some metadata, rewrite the cache file - if file_name or roles or path: - new_metadata = { - "file_name": file_name, - "path": path, - "roles": roles, - "indexed_at": time.time(), - "backfilled": True, - } - new_payload = { - "segments": segments, - "metadata": new_metadata - } - with gzip.open(cache_file, "wt", encoding="utf-8") as handle: - json.dump(new_payload, handle) - backfilled += 1 - logger.debug("Backfilled metadata for %s", cache_file.name) - else: - skipped += 1 - - except Exception: - errors += 1 - logger.debug("Failed to backfill %s", cache_file, exc_info=True) - - return { - "cache_dir": str(cache_dir), - "cache_files": len(cache_files), - "backfilled": backfilled, - "skipped": skipped, - "errors": errors, - "status": "ok" - } diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/reference_stem_builder.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/reference_stem_builder.py deleted file mode 100644 index fb1a15e..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/reference_stem_builder.py +++ /dev/null @@ -1,264 +0,0 @@ -""" -reference_stem_builder.py - Rebuild an Ableton arrangement directly from a reference track. -""" - -from __future__ import annotations - -import json -import logging -import socket -from pathlib import Path -from typing import Any, Dict, List, Tuple - -import soundfile as sf -import torch -from demucs.apply import apply_model -from demucs.pretrained import get_model - -try: - import librosa -except ImportError: # pragma: no cover - librosa = None - -try: - from reference_listener import ReferenceAudioListener -except ImportError: # pragma: no cover - from .reference_listener import ReferenceAudioListener - - -logger = logging.getLogger("ReferenceStemBuilder") - -HOST = "127.0.0.1" -PORT = 9877 -MESSAGE_TERMINATOR = b"\n" -SCRIPT_DIR = Path(__file__).resolve().parent -PACKAGE_DIR = SCRIPT_DIR.parent -PROJECT_SAMPLES_DIR = PACKAGE_DIR.parent / "librerias" / "organized_samples" -SAMPLES_DIR = str(PROJECT_SAMPLES_DIR) - -TRACK_LAYOUT = ( - ("REFERENCE FULL", 59, 0.72, True), - ("REF DRUMS", 10, 0.84, False), - ("REF BASS", 30, 0.82, False), - ("REF OTHER", 50, 0.68, False), - ("REF VOCALS", 40, 0.70, False), -) - -SECTION_BLUEPRINTS = { - "club": [ - ("INTRO DJ", 16), - ("GROOVE A", 16), - ("VOCAL BUILD", 8), - ("DROP A", 16), - ("BREAKDOWN", 8), - ("BUILD B", 8), - ("DROP B", 16), - ("PEAK", 8), - ("OUTRO DJ", 16), - ], - "standard": [ - ("INTRO", 8), - ("BUILD", 8), - ("DROP A", 16), - ("BREAK", 8), - ("DROP B", 16), - ("OUTRO", 8), - ], -} - - -class AbletonSocketClient: - def __init__(self, host: str = HOST, port: int = PORT): - self.host = host - self.port = port - - def send(self, command_type: str, params: Dict[str, Any] | None = None, timeout: float = 30.0) -> Dict[str, Any]: - payload = json.dumps({"type": command_type, "params": params or {}}, separators=(",", ":")).encode("utf-8") + MESSAGE_TERMINATOR - with socket.create_connection((self.host, self.port), timeout=timeout) as sock: - sock.sendall(payload) - data = b"" - while not data.endswith(MESSAGE_TERMINATOR): - chunk = sock.recv(65536) - if not chunk: - break - data += chunk - if not data: - raise RuntimeError(f"Sin respuesta para {command_type}") - return json.loads(data.decode("utf-8", errors="replace").strip()) - - -def _resolve_reference_profile(reference_path: Path) -> Dict[str, Any]: - listener = ReferenceAudioListener(SAMPLES_DIR) - analysis = listener.analyze_reference(str(reference_path)) - structure = "club" if analysis.get("duration", 0.0) >= 180 else "standard" - return { - "tempo": float(analysis.get("tempo", 128.0) or 128.0), - "key": str(analysis.get("key", "") or ""), - "duration": float(analysis.get("duration", 0.0) or 0.0), - "structure": structure, - "listener_device": analysis.get("device", "cpu"), - } - - -def ensure_reference_wav(reference_path: Path) -> Path: - if reference_path.suffix.lower() == ".wav": - return reference_path - - if librosa is None: - raise RuntimeError("librosa no está disponible para convertir la referencia a WAV") - - wav_path = reference_path.with_suffix(".wav") - if wav_path.exists() and wav_path.stat().st_size > 0: - return wav_path - - y, sr = librosa.load(str(reference_path), sr=44100, mono=False) - if y.ndim == 1: - y = y.reshape(1, -1) - sf.write(str(wav_path), y.T, sr, subtype="PCM_16") - return wav_path - - -def separate_stems(reference_wav: Path, output_dir: Path) -> Dict[str, Path]: - output_dir.mkdir(parents=True, exist_ok=True) - stem_root = output_dir / reference_wav.stem - expected = { - "reference": reference_wav, - "drums": stem_root / "drums.wav", - "bass": stem_root / "bass.wav", - "other": stem_root / "other.wav", - "vocals": stem_root / "vocals.wav", - } - if all(path.exists() and path.stat().st_size > 0 for path in expected.values()): - return expected - - audio, sr = sf.read(str(reference_wav), always_2d=True) - if sr != 44100: - raise RuntimeError(f"Sample rate inesperado en referencia WAV: {sr}") - - model = get_model("htdemucs") - model.cpu() - model.eval() - waveform = torch.tensor(audio.T, dtype=torch.float32) - separated = apply_model(model, waveform[None], device="cpu", progress=False)[0] - - stem_root.mkdir(parents=True, exist_ok=True) - for stem_name, tensor in zip(model.sources, separated): - stem_path = stem_root / f"{stem_name}.wav" - sf.write(str(stem_path), tensor.detach().cpu().numpy().T, sr, subtype="PCM_16") - - return expected - - -def _sections_for_structure(structure: str) -> List[Tuple[str, int]]: - return list(SECTION_BLUEPRINTS.get(structure.lower(), SECTION_BLUEPRINTS["standard"])) - - -def _create_track(client: AbletonSocketClient, name: str, color: int, volume: float) -> int: - response = client.send("create_track", {"type": "audio", "index": -1}) - if response.get("status") != "success": - raise RuntimeError(response.get("message", f"No se pudo crear {name}")) - track_index = int(response.get("result", {}).get("index")) - client.send("set_track_name", {"index": track_index, "name": name}) - client.send("set_track_color", {"index": track_index, "color": color}) - client.send("set_track_volume", {"index": track_index, "volume": volume}) - return track_index - - -def _import_full_length_audio(client: AbletonSocketClient, track_index: int, file_path: Path, name: str) -> None: - response = client.send("create_arrangement_audio_pattern", { - "track_index": track_index, - "file_path": str(file_path), - "positions": [0.0], - "name": name, - }, timeout=120.0) - if response.get("status") != "success": - raise RuntimeError(response.get("message", f"No se pudo importar {name}")) - - -def _prepare_navigation_scenes(client: AbletonSocketClient, structure: str) -> None: - sections = _sections_for_structure(structure) - session_info = client.send("get_session_info") - if session_info.get("status") != "success": - return - - scene_count = int(session_info.get("result", {}).get("num_scenes", 0) or 0) - target_count = len(sections) - - while scene_count < target_count: - create_response = client.send("create_scene", {"index": -1}) - if create_response.get("status") != "success": - break - scene_count += 1 - - while scene_count > target_count and scene_count > 1: - delete_response = client.send("delete_scene", {"index": scene_count - 1}) - if delete_response.get("status") != "success": - break - scene_count -= 1 - - for scene_index, (section_name, _) in enumerate(sections): - client.send("set_scene_name", {"index": scene_index, "name": section_name}) - - -def rebuild_project_from_reference(reference_path: Path) -> Dict[str, Any]: - reference_path = reference_path.resolve() - if not reference_path.exists(): - raise FileNotFoundError(reference_path) - - profile = _resolve_reference_profile(reference_path) - reference_wav = ensure_reference_wav(reference_path) - stems = separate_stems(reference_wav, reference_path.parent / "stems") - - client = AbletonSocketClient() - clear_response = client.send("clear_project", {"keep_tracks": 0}, timeout=120.0) - if clear_response.get("status") != "success": - raise RuntimeError(clear_response.get("message", "No se pudo limpiar el proyecto")) - - client.send("stop", {}) - client.send("set_tempo", {"tempo": round(profile["tempo"], 3)}) - client.send("show_arrangement_view", {}) - client.send("jump_to", {"time": 0}) - - created = [] - for (track_name, color, volume, muted), stem_key in zip(TRACK_LAYOUT, ("reference", "drums", "bass", "other", "vocals")): - track_index = _create_track(client, track_name, color, volume) - _import_full_length_audio(client, track_index, stems[stem_key], track_name) - if muted: - client.send("set_track_mute", {"index": track_index, "mute": True}) - created.append({ - "track_index": track_index, - "name": track_name, - "file_path": str(stems[stem_key]), - }) - - _prepare_navigation_scenes(client, profile["structure"]) - client.send("loop_selection", {"start": 0, "length": max(32.0, round(profile["duration"] * profile["tempo"] / 60.0, 3)), "enable": False}) - client.send("jump_to", {"time": 0}) - client.send("show_arrangement_view", {}) - - session_info = client.send("get_session_info") - return { - "reference": str(reference_path), - "tempo": profile["tempo"], - "key": profile["key"], - "structure": profile["structure"], - "listener_device": profile["listener_device"], - "stems": created, - "session_info": session_info.get("result", {}), - } - - -def main() -> int: - import argparse - - parser = argparse.ArgumentParser(description="Rebuild an Ableton project directly from a reference track.") - parser.add_argument("reference_path", help="Absolute or relative path to the reference audio file") - args = parser.parse_args() - - result = rebuild_project_from_reference(Path(args.reference_path)) - print(json.dumps(result, indent=2, ensure_ascii=False)) - return 0 - - -if __name__ == "__main__": - raise SystemExit(main()) diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/requirements.txt b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/requirements.txt deleted file mode 100644 index cf2a8b2..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/requirements.txt +++ /dev/null @@ -1,13 +0,0 @@ -# Dependencias de AbletonMCP-AI Server -# Instalar con: pip install -r requirements.txt - -mcp>=1.0.0 -# Servidor MCP FastMCP - -# Opcional: para análisis de audio avanzado -# numpy>=1.24.0 -# librosa>=0.10.0 - -# Opcional: para procesamiento con GPU AMD -# torch==2.4.1 -# torch-directml>=0.2.5 diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/retrieval_benchmark.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/retrieval_benchmark.py deleted file mode 100644 index 5224785..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/retrieval_benchmark.py +++ /dev/null @@ -1,525 +0,0 @@ -""" -retrieval_benchmark.py - Offline benchmark harness for retrieval quality inspection. - -Analyzes reference tracks and outputs top-N candidates per role to help spot -role contamination and evaluate retrieval quality. - -Usage: - python retrieval_benchmark.py --reference "path/to/track.mp3" - python retrieval_benchmark.py --reference "track1.mp3" "track2.mp3" --top-n 10 - python retrieval_benchmark.py --reference "track.mp3" --output results.json --format json - python retrieval_benchmark.py --reference "track.mp3" --output results.md --format markdown -""" - -from __future__ import annotations - -import argparse -import json -import logging -import sys -import time -from collections import defaultdict -from pathlib import Path -from typing import Any, Dict, List, Optional - -# Add parent directory to path for imports when running as script -sys.path.insert(0, str(Path(__file__).parent)) - -from reference_listener import ReferenceAudioListener, ROLE_SEGMENT_SETTINGS - -logger = logging.getLogger(__name__) - - -def _default_library_dir() -> Path: - """Get the default library directory.""" - return Path(__file__).resolve().parents[2] / "librerias" / "all_tracks" - - -def run_benchmark( - reference_paths: List[str], - library_dir: Path, - top_n: int = 10, - roles: Optional[List[str]] = None, - duration_limit: Optional[float] = None, -) -> Dict[str, Any]: - """ - Run retrieval benchmark on one or more reference tracks. - - Args: - reference_paths: List of paths to reference audio files - library_dir: Path to the sample library - top_n: Number of top candidates to show per role - roles: Optional list of specific roles to analyze - duration_limit: Optional duration limit for analysis - - Returns: - Dict containing benchmark results for each reference - """ - listener = ReferenceAudioListener(str(library_dir)) - - all_roles = list(ROLE_SEGMENT_SETTINGS.keys()) - target_roles = [r for r in (roles or all_roles) if r in all_roles] - - results = { - "benchmark_info": { - "library_dir": str(library_dir), - "top_n": top_n, - "roles": target_roles, - "timestamp": time.strftime("%Y-%m-%dT%H:%M:%S"), - "device": listener.device_name, - }, - "references": [], - } - - for ref_path in reference_paths: - ref_path = Path(ref_path) - if not ref_path.exists(): - logger.warning("Reference file not found: %s", ref_path) - continue - - logger.info("Analyzing reference: %s", ref_path.name) - - try: - start_time = time.time() - - # Run match_assets to get candidates per role - match_result = listener.match_assets(str(ref_path)) - reference_info = match_result.get("reference", {}) - matches = match_result.get("matches", {}) - - elapsed = time.time() - start_time - - ref_result = { - "file_name": ref_path.name, - "path": str(ref_path), - "analysis_time_seconds": round(elapsed, 2), - "reference_info": { - "tempo": reference_info.get("tempo"), - "key": reference_info.get("key"), - "duration": reference_info.get("duration"), - "rms_mean": reference_info.get("rms_mean"), - "onset_mean": reference_info.get("onset_mean"), - "spectral_centroid": reference_info.get("spectral_centroid"), - }, - "sections": [ - { - "kind": s.get("kind"), - "start": s.get("start"), - "end": s.get("end"), - "bars": s.get("bars"), - } - for s in match_result.get("reference_sections", []) - ], - "role_candidates": {}, - } - - # Process each role - for role in target_roles: - role_matches = matches.get(role, []) - top_candidates = role_matches[:top_n] - - ref_result["role_candidates"][role] = { - "total_available": len(role_matches), - "top_candidates": [ - { - "rank": i + 1, - "file_name": c.get("file_name"), - "path": c.get("path"), - "score": c.get("score"), - "cosine": c.get("cosine"), - "segment_score": c.get("segment_score"), - "catalog_score": c.get("catalog_score"), - "tempo": c.get("tempo"), - "key": c.get("key"), - "duration": c.get("duration"), - } - for i, c in enumerate(top_candidates) - ], - } - - results["references"].append(ref_result) - logger.info("Completed analysis in %.2fs", elapsed) - - except Exception as e: - logger.error("Failed to analyze %s: %s", ref_path, e, exc_info=True) - results["references"].append({ - "file_name": ref_path.name, - "path": str(ref_path), - "error": str(e), - }) - - return results - - -def analyze_role_contamination(results: Dict[str, Any]) -> Dict[str, Any]: - """ - Analyze results for potential role contamination issues. - - Returns a dict with contamination analysis: - - files appearing in multiple roles - - misnamed files (e.g., "bass" appearing in "kick" role) - - score distribution anomalies - """ - contamination = { - "cross_role_files": [], - "potential_mismatches": [], - "role_score_stats": {}, - } - - # Track files appearing in multiple roles - file_to_roles: Dict[str, List[Dict[str, Any]]] = defaultdict(list) - - for ref in results.get("references", []): - ref_name = ref.get("file_name", "unknown") - - for role, role_data in ref.get("role_candidates", {}).items(): - for candidate in role_data.get("top_candidates", []): - file_name = candidate.get("file_name", "") - if file_name: - file_to_roles[file_name].append({ - "reference": ref_name, - "role": role, - "rank": candidate.get("rank"), - "score": candidate.get("score"), - }) - - # Find files appearing in multiple roles - for file_name, appearances in file_to_roles.items(): - unique_roles = set(a["role"] for a in appearances) - if len(unique_roles) > 1: - contamination["cross_role_files"].append({ - "file_name": file_name, - "roles": list(unique_roles), - "appearances": appearances, - }) - - # Check for potential mismatches (filename suggests different role) - role_keywords = { - "kick": ["kick"], - "snare": ["snare", "clap"], - "hat": ["hat", "hihat", "hi-hat"], - "bass_loop": ["bass", "sub", "808"], - "perc_loop": ["perc", "percussion", "conga", "bongo"], - "top_loop": ["top", "drum loop", "full drum"], - "synth_loop": ["synth", "lead", "pad", "chord", "arp"], - "vocal_loop": ["vocal", "vox", "acapella"], - "crash_fx": ["crash", "cymbal", "impact"], - "fill_fx": ["fill", "transition", "tom"], - "snare_roll": ["roll", "snareroll"], - "atmos_fx": ["atmos", "drone", "ambient", "texture"], - "vocal_shot": ["shot", "vocal shot", "chop"], - } - - for ref in results.get("references", []): - for role, role_data in ref.get("role_candidates", {}).items(): - for candidate in role_data.get("top_candidates", []): - file_name = candidate.get("file_name", "").lower() - if not file_name: - continue - - # Check if file name suggests a different role - expected_keywords = role_keywords.get(role, []) - other_role_matches = [] - - for other_role, keywords in role_keywords.items(): - if other_role == role: - continue - if any(kw in file_name for kw in keywords): - other_role_matches.append(other_role) - - if other_role_matches and expected_keywords: - # File name matches another role but not this one - if not any(kw in file_name for kw in expected_keywords): - contamination["potential_mismatches"].append({ - "file_name": candidate.get("file_name"), - "assigned_role": role, - "rank": candidate.get("rank"), - "score": candidate.get("score"), - "suggested_roles": other_role_matches, - }) - - # Calculate score distribution per role - for ref in results.get("references", []): - for role, role_data in ref.get("role_candidates", {}).items(): - scores = [ - c.get("score", 0) - for c in role_data.get("top_candidates", []) - if c.get("score") is not None - ] - - if scores: - contamination["role_score_stats"][role] = { - "min": round(min(scores), 4), - "max": round(max(scores), 4), - "avg": round(sum(scores) / len(scores), 4), - "count": len(scores), - } - - return contamination - - -def format_output_json(results: Dict[str, Any]) -> str: - """Format results as JSON string.""" - return json.dumps(results, indent=2, ensure_ascii=False) - - -def format_output_markdown(results: Dict[str, Any]) -> str: - """Format results as markdown string.""" - lines = [] - - # Header - lines.append("# Retrieval Benchmark Report") - lines.append("") - lines.append(f"**Generated:** {results['benchmark_info']['timestamp']}") - lines.append(f"**Library:** `{results['benchmark_info']['library_dir']}`") - lines.append(f"**Top N:** {results['benchmark_info']['top_n']}") - lines.append(f"**Device:** {results['benchmark_info']['device']}") - lines.append("") - - # Process each reference - for ref in results.get("references", []): - lines.append(f"## Reference: {ref.get('file_name', 'unknown')}") - lines.append("") - - # Error case - if "error" in ref: - lines.append(f"**Error:** {ref['error']}") - lines.append("") - continue - - # Reference info - ref_info = ref.get("reference_info", {}) - lines.append("### Reference Analysis") - lines.append("") - lines.append("| Property | Value |") - lines.append("|----------|-------|") - lines.append(f"| Tempo | {ref_info.get('tempo', 'N/A')} BPM |") - lines.append(f"| Key | {ref_info.get('key', 'N/A')} |") - lines.append(f"| Duration | {ref_info.get('duration', 'N/A')}s |") - lines.append(f"| RMS Mean | {ref_info.get('rms_mean', 'N/A')} |") - lines.append(f"| Onset Mean | {ref_info.get('onset_mean', 'N/A')} |") - lines.append(f"| Spectral Centroid | {ref_info.get('spectral_centroid', 'N/A')} Hz |") - lines.append("") - - # Sections - sections = ref.get("sections", []) - if sections: - lines.append("### Detected Sections") - lines.append("") - lines.append("| Type | Start | End | Bars |") - lines.append("|------|-------|-----|------|") - for s in sections: - lines.append(f"| {s.get('kind', 'N/A')} | {s.get('start', 'N/A')}s | {s.get('end', 'N/A')}s | {s.get('bars', 'N/A')} |") - lines.append("") - - # Role candidates - lines.append("### Top Candidates per Role") - lines.append("") - - for role, role_data in ref.get("role_candidates", {}).items(): - total = role_data.get("total_available", 0) - lines.append(f"#### {role} ({total} available)") - lines.append("") - - candidates = role_data.get("top_candidates", []) - if not candidates: - lines.append("*No candidates found*") - lines.append("") - continue - - lines.append("| Rank | File | Score | Cosine | Seg | Catalog | Tempo | Key | Duration |") - lines.append("|------|------|-------|--------|-----|---------|-------|-----|----------|") - - for c in candidates: - lines.append( - f"| {c.get('rank', 'N/A')} | " - f"`{c.get('file_name', 'N/A')[:40]}` | " - f"{c.get('score', 0):.4f} | " - f"{c.get('cosine', 0):.4f} | " - f"{c.get('segment_score', 0):.4f} | " - f"{c.get('catalog_score', 0):.4f} | " - f"{c.get('tempo', 'N/A')} | " - f"{c.get('key', 'N/A')} | " - f"{c.get('duration', 'N/A'):.2f}s |" - ) - lines.append("") - - # Contamination analysis - if "contamination_analysis" in results: - contam = results["contamination_analysis"] - lines.append("## Role Contamination Analysis") - lines.append("") - - # Cross-role files - cross_role = contam.get("cross_role_files", []) - if cross_role: - lines.append("### Files Appearing in Multiple Roles") - lines.append("") - for item in cross_role: - lines.append(f"- **{item['file_name']}**") - lines.append(f" - Roles: {', '.join(item['roles'])}") - for app in item["appearances"]: - lines.append(f" - {app['role']}: rank {app['rank']}, score {app['score']:.4f}") - lines.append("") - - # Potential mismatches - mismatches = contam.get("potential_mismatches", []) - if mismatches: - lines.append("### Potential Role Mismatches") - lines.append("") - lines.append("Files whose names suggest a different role than assigned:") - lines.append("") - for item in mismatches: - lines.append(f"- **{item['file_name']}**") - lines.append(f" - Assigned: {item['assigned_role']} (rank {item['rank']}, score {item['score']:.4f})") - lines.append(f" - Suggested: {', '.join(item['suggested_roles'])}") - lines.append("") - - # Score stats - score_stats = contam.get("role_score_stats", {}) - if score_stats: - lines.append("### Score Distribution per Role") - lines.append("") - lines.append("| Role | Min | Max | Avg | Count |") - lines.append("|------|-----|-----|-----|-------|") - for role, stats in sorted(score_stats.items()): - lines.append( - f"| {role} | {stats['min']:.4f} | {stats['max']:.4f} | " - f"{stats['avg']:.4f} | {stats['count']} |" - ) - lines.append("") - - return "\n".join(lines) - - -def main() -> int: - parser = argparse.ArgumentParser( - description="Offline benchmark harness for retrieval quality inspection.", - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=""" -Examples: - %(prog)s --reference "track.mp3" - %(prog)s --reference "track1.mp3" "track2.mp3" --top-n 15 - %(prog)s --reference "track.mp3" --output results.md --format markdown - %(prog)s --reference "track.mp3" --roles kick snare hat --top-n 20 - """, - ) - - parser.add_argument( - "--reference", "-r", - nargs="+", - required=True, - help="One or more reference audio files to analyze", - ) - parser.add_argument( - "--library-dir", - default=str(_default_library_dir()), - help="Audio library directory (default: ../librerias/all_tracks)", - ) - parser.add_argument( - "--top-n", "-n", - type=int, - default=10, - help="Number of top candidates to show per role (default: 10)", - ) - parser.add_argument( - "--roles", - nargs="*", - default=None, - help="Specific roles to analyze (default: all roles)", - ) - parser.add_argument( - "--output", "-o", - type=str, - default=None, - help="Output file path for results", - ) - parser.add_argument( - "--format", "-f", - choices=["json", "markdown", "md"], - default=None, - help="Output format (json or markdown). Auto-detected from output file extension if not specified.", - ) - parser.add_argument( - "--analyze-contamination", - action="store_true", - help="Include role contamination analysis in output", - ) - parser.add_argument( - "--verbose", "-v", - action="store_true", - help="Enable verbose logging", - ) - parser.add_argument( - "--duration-limit", - type=float, - default=None, - help="Optional duration limit for audio analysis", - ) - - args = parser.parse_args() - - # Configure logging - if args.verbose: - logging.basicConfig(level=logging.DEBUG, format="%(levelname)s: %(message)s") - else: - logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") - - # Validate reference files - reference_paths = [] - for ref in args.reference: - ref_path = Path(ref) - if ref_path.exists(): - reference_paths.append(str(ref_path)) - else: - logger.warning("Reference file not found: %s", ref) - - if not reference_paths: - logger.error("No valid reference files provided") - return 1 - - # Run benchmark - logger.info("Running retrieval benchmark on %d reference(s)", len(reference_paths)) - - results = run_benchmark( - reference_paths=reference_paths, - library_dir=Path(args.library_dir), - top_n=args.top_n, - roles=args.roles, - duration_limit=args.duration_limit, - ) - - # Add contamination analysis if requested - if args.analyze_contamination: - logger.info("Analyzing role contamination...") - results["contamination_analysis"] = analyze_role_contamination(results) - - # Determine output format - output_format = args.format - if output_format is None and args.output: - output_format = "markdown" if args.output.endswith(".md") else "json" - output_format = output_format or "text" - - # Format output - if output_format in ("markdown", "md"): - output_text = format_output_markdown(results) - elif output_format == "json": - output_text = format_output_json(results) - else: - # Plain text summary - output_text = format_output_markdown(results) - - # Write to file or stdout - if args.output: - output_path = Path(args.output) - output_path.parent.mkdir(parents=True, exist_ok=True) - output_path.write_text(output_text, encoding="utf-8") - logger.info("Results written to: %s", output_path) - else: - print(output_text) - - return 0 - - -if __name__ == "__main__": - raise SystemExit(main()) \ No newline at end of file diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/role_matcher.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/role_matcher.py deleted file mode 100644 index a2a79a8..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/role_matcher.py +++ /dev/null @@ -1,469 +0,0 @@ -""" -role_matcher.py - Phase 4: Role validation and sample matching utilities - -This module provides enhanced role matching for sample selection with: -- Role validation based on audio characteristics -- Aggressive sample detection and filtering -- Logging of matching decisions -- Integration with reference_listener and sample_selector -""" - -import logging -from typing import Any, Dict, List, Optional - -logger = logging.getLogger("RoleMatcher") - - -# ============================================================================ -# CONSTANTS -# ============================================================================ - -# Valid roles for sample matching with their expected characteristics -VALID_ROLES = { - # One-shot drums - "kick": {"max_duration": 2.0, "min_onset": 0.3, "is_loop": False, "bus": "drums"}, - "snare": {"max_duration": 2.0, "min_onset": 0.25, "is_loop": False, "bus": "drums"}, - "hat": {"max_duration": 1.5, "min_onset": 0.2, "is_loop": False, "bus": "drums"}, - "clap": {"max_duration": 2.0, "min_onset": 0.25, "is_loop": False, "bus": "drums"}, - "ride": {"max_duration": 3.0, "min_onset": 0.15, "is_loop": False, "bus": "drums"}, - "perc": {"max_duration": 2.5, "min_onset": 0.2, "is_loop": False, "bus": "drums"}, - # Loops - "bass_loop": {"min_duration": 2.0, "max_duration": 16.0, "is_loop": True, "bus": "bass"}, - "perc_loop": {"min_duration": 2.0, "max_duration": 16.0, "is_loop": True, "bus": "drums"}, - "top_loop": {"min_duration": 2.0, "max_duration": 16.0, "is_loop": True, "bus": "drums"}, - "synth_loop": {"min_duration": 2.0, "max_duration": 16.0, "is_loop": True, "bus": "music"}, - "vocal_loop": {"min_duration": 2.0, "max_duration": 16.0, "is_loop": True, "bus": "vocal"}, - # FX - "crash_fx": {"max_duration": 4.0, "is_loop": False, "bus": "fx"}, - "fill_fx": {"max_duration": 8.0, "is_loop": False, "bus": "fx"}, - "snare_roll": {"max_duration": 8.0, "is_loop": False, "bus": "drums"}, - "atmos_fx": {"min_duration": 4.0, "is_loop": True, "bus": "fx"}, - "vocal_shot": {"max_duration": 3.0, "is_loop": False, "bus": "vocal"}, - # Resample layers - "resample_reverse": {"is_loop": False, "bus": "fx"}, - "resample_riser": {"is_loop": False, "bus": "fx"}, - "resample_downlifter": {"is_loop": False, "bus": "fx"}, - "resample_stutter": {"is_loop": False, "bus": "vocal"}, -} - -# Keywords that indicate aggressive/hard samples that may be misclassified -AGGRESSIVE_KEYWORDS = { - # Very aggressive kick patterns - "hard", "distorted", "industrial", "slam", "punch", "brutal", - # Potentially misclassified - "subdrop", "impact", "explosion", "destroy", -} - -# Keywords that are acceptable for aggressive genres -GENRE_APPROPRIATE_AGGRESSIVE = { - "industrial-techno", "hard-techno", "raw-techno", "psytrance", "dark-techno" -} - -# Role aliases for flexible matching -ROLE_ALIASES = { - "kick": ["kick", "bd", "bassdrum", "bass_drum"], - "snare": ["snare", "sd", "snr"], - "clap": ["clap", "cp", "handclap"], - "hat": ["hat", "hihat", "hi_hat", "hhat", "closed_hat", "hat_closed"], - "hat_open": ["open_hat", "hat_open", "ohat", "openhihat"], - "ride": ["ride", "rd", "cymbal"], - "perc": ["perc", "percussion", "percs"], - "bass_loop": ["bass_loop", "bassloop", "bass loop", "sub_bass"], - "perc_loop": ["perc_loop", "percloop", "percussion loop", "perc loop"], - "top_loop": ["top_loop", "toploop", "top loop", "full_drum"], - "synth_loop": ["synth_loop", "synthloop", "synth loop", "chord_loop", "stab"], - "vocal_loop": ["vocal_loop", "vocalloop", "vocal loop", "vox_loop", "vox"], - "crash_fx": ["crash", "crash_fx", "crashfx", "impact_fx"], - "fill_fx": ["fill", "fill_fx", "fillfx", "tom_fill", "transition"], - "snare_roll": ["snare_roll", "snareroll", "snare roll", "snr_roll"], - "atmos_fx": ["atmos", "atmos_fx", "atmosfx", "drone", "pad_fx"], - "vocal_shot": ["vocal_shot", "vocalshot", "vocal shot", "vocal_one_shot"], -} - -# Minimum score thresholds for role matching -ROLE_SCORE_THRESHOLDS = { - "kick": 0.35, - "snare": 0.32, - "hat": 0.30, - "clap": 0.32, - "bass_loop": 0.38, - "perc_loop": 0.35, - "top_loop": 0.35, - "synth_loop": 0.36, - "vocal_loop": 0.38, - "crash_fx": 0.30, - "fill_fx": 0.32, - "snare_roll": 0.30, - "atmos_fx": 0.32, - "vocal_shot": 0.34, -} - - -# ============================================================================ -# VALIDATION FUNCTIONS -# ============================================================================ - -def validate_role_for_sample( - role: str, - sample_data: Dict[str, Any], - genre: Optional[str] = None, -) -> Dict[str, Any]: - """ - Validates if a sample is appropriate for a given role. - - Args: - role: The role to validate for (e.g., 'kick', 'bass_loop') - sample_data: Sample metadata with keys like 'duration', 'onset_mean', 'file_name', 'rms_mean' - genre: Optional genre for context-aware aggressive sample handling - - Returns: - Dict with keys: - - 'valid' (bool): Whether the sample passes validation - - 'score' (float): Raw validation score (0.0-1.0) - - 'warnings' (list): List of warning messages - - 'adjusted_score' (float): Score after penalties - """ - if role not in VALID_ROLES: - return {"valid": True, "score": 0.5, "warnings": [f"Unknown role: {role}"], "adjusted_score": 0.5} - - role_config = VALID_ROLES[role] - warnings: List[str] = [] - score = 1.0 - - duration = float(sample_data.get("duration", 0.0) or 0.0) - onset = float(sample_data.get("onset_mean", 0.0) or 0.0) - file_name = str(sample_data.get("file_name", "") or "").lower() - rms = float(sample_data.get("rms_mean", 0.0) or 0.0) - - # Duration validation - if role_config.get("is_loop"): - min_dur = role_config.get("min_duration", 2.0) - max_dur = role_config.get("max_duration", 16.0) - if duration < min_dur: - warnings.append(f"Duration {duration:.1f}s too short for loop role (min {min_dur}s)") - score *= 0.7 - elif max_dur and duration > max_dur: - warnings.append(f"Duration {duration:.1f}s too long for role (max {max_dur}s)") - score *= 0.85 - else: - max_dur = role_config.get("max_duration", 3.0) - if duration > max_dur: - warnings.append(f"Duration {duration:.1f}s too long for one-shot role (max {max_dur}s)") - score *= 0.75 - if "loop" in file_name and role in ["kick", "snare", "hat", "clap"]: - warnings.append("One-shot role has 'loop' in filename") - score *= 0.65 - - # Onset validation for percussive elements - min_onset = role_config.get("min_onset", 0.0) - if min_onset > 0 and onset < min_onset: - warnings.append(f"Onset {onset:.2f} below minimum {min_onset:.2f}") - score *= 0.85 - - # Check for aggressive samples that might be misclassified - aggressive_penalty = 1.0 - is_aggressive_genre = genre and genre.lower() in GENRE_APPROPRIATE_AGGRESSIVE - - for keyword in AGGRESSIVE_KEYWORDS: - if keyword in file_name: - if not is_aggressive_genre: - aggressive_penalty *= 0.88 - warnings.append(f"Aggressive keyword '{keyword}' found for non-aggressive genre") - - score *= aggressive_penalty - - # RMS validation for certain roles - if role in ["kick", "snare", "clap"] and rms > 0.4: - warnings.append(f"High RMS {rms:.3f} for one-shot role") - score *= 0.9 - - adjusted_score = max(0.1, min(1.0, score)) - - return { - "valid": score >= 0.4, - "score": score, - "warnings": warnings, - "adjusted_score": adjusted_score, - } - - -def resolve_role_from_alias(alias: str) -> Optional[str]: - """ - Resolves a role name from various aliases. - - Args: - alias: A potential role alias (e.g., 'bd', 'hihat', 'bass loop') - - Returns: - The canonical role name or None if not found - """ - alias_lower = alias.lower().strip().replace("-", "_").replace(" ", "_") - - # Direct match - if alias_lower in VALID_ROLES: - return alias_lower - - # Check aliases - for role, aliases in ROLE_ALIASES.items(): - normalized_aliases = [a.lower().replace("-", "_").replace(" ", "_") for a in aliases] - if alias_lower in normalized_aliases: - return role - - return None - - -def get_bus_for_role(role: str) -> str: - """ - Gets the appropriate bus for a role. - - Args: - role: The role name - - Returns: - Bus name ('drums', 'bass', 'music', 'vocal', or 'fx') - """ - if role in VALID_ROLES: - return VALID_ROLES[role].get("bus", "music") - return "music" - - -# ============================================================================ -# LOGGING FUNCTIONS -# ============================================================================ - -def log_matching_decision( - role: str, - selected_sample: Optional[Dict[str, Any]], - candidates_count: int, - final_score: float, - validation_result: Optional[Dict[str, Any]] = None, -) -> None: - """ - Logs detailed matching decisions for debugging and analysis. - - Args: - role: The role being matched - selected_sample: The selected sample dict or None - candidates_count: Number of candidates considered - final_score: The final matching score - validation_result: Optional validation result dict - """ - if not selected_sample: - logger.info( - f"[MATCH] Role '{role}': No sample selected (0/{candidates_count} candidates)" - ) - return - - sample_name = selected_sample.get("file_name", "unknown") - sample_tempo = selected_sample.get("tempo", 0.0) - sample_key = selected_sample.get("key", "N/A") - sample_dur = selected_sample.get("duration", 0.0) - - log_parts = [ - f"[MATCH] Role '{role}':", - f"Sample: {sample_name}", - f"Score: {final_score:.3f}", - f"Tempo: {sample_tempo:.1f}", - f"Key: {sample_key}", - f"Duration: {sample_dur:.1f}s", - f"Candidates: {candidates_count}", - ] - - if validation_result: - warnings = validation_result.get("warnings", []) - if warnings: - log_parts.append(f"Warnings: {', '.join(warnings)}") - log_parts.append(f"Validated: {validation_result.get('valid', True)}") - - logger.info(" | ".join(log_parts)) - - -# ============================================================================ -# ENHANCEMENT FUNCTIONS -# ============================================================================ - -def enhance_sample_matching( - matches: Dict[str, List[Dict[str, Any]]], - reference: Dict[str, Any], - genre: Optional[str] = None, -) -> Dict[str, List[Dict[str, Any]]]: - """ - Enhances sample matching results with validation and filtering. - - This function takes raw matches from reference_listener and applies: - 1. Role validation based on audio characteristics - 2. Aggressive sample filtering - 3. Score adjustment based on validation results - - Args: - matches: Raw matches from reference_listener (role -> list of sample dicts) - reference: Reference track analysis data - genre: Target genre for context-aware filtering - - Returns: - Enhanced matches with validation scores and filtering applied - """ - enhanced: Dict[str, List[Dict[str, Any]]] = {} - - for role, candidates in matches.items(): - if not candidates: - enhanced[role] = [] - continue - - threshold = ROLE_SCORE_THRESHOLDS.get(role, 0.30) - enhanced_candidates: List[Dict[str, Any]] = [] - - for candidate in candidates: - # Create a copy to avoid modifying the original - enhanced_candidate = dict(candidate) - - # Validate the sample for this role - validation = validate_role_for_sample(role, candidate, genre) - enhanced_candidate["validation"] = validation - - # Apply validation penalty to the score - original_score = float(candidate.get("score", 0.0)) - adjusted_score = original_score * validation["adjusted_score"] - enhanced_candidate["adjusted_score"] = round(adjusted_score, 6) - - # Filter out samples below threshold - if adjusted_score >= threshold: - enhanced_candidates.append(enhanced_candidate) - else: - logger.debug( - f"[FILTER] Role '{role}': Filtered out '{candidate.get('file_name', 'unknown')}' " - f"(score {adjusted_score:.3f} < threshold {threshold})" - ) - - # Re-sort by adjusted score - enhanced_candidates.sort(key=lambda x: float(x.get("adjusted_score", 0.0)), reverse=True) - enhanced[role] = enhanced_candidates - - # Log summary - filtered_count = len(candidates) - len(enhanced_candidates) - if filtered_count > 0: - logger.info( - f"[ENHANCE] Role '{role}': {len(enhanced_candidates)}/{len(candidates)} candidates passed validation " - f"({filtered_count} filtered out)" - ) - - return enhanced - - -def filter_aggressive_samples( - candidates: List[Dict[str, Any]], - genre: Optional[str] = None, - strict: bool = False, -) -> List[Dict[str, Any]]: - """ - Filters out samples with aggressive keywords unless appropriate for the genre. - - Args: - candidates: List of sample candidate dicts - genre: Target genre - strict: If True, apply stricter filtering - - Returns: - Filtered list of candidates - """ - is_aggressive_genre = genre and genre.lower() in GENRE_APPROPRIATE_AGGRESSIVE - - if is_aggressive_genre: - # For aggressive genres, don't filter aggressive samples - return candidates - - filtered = [] - for candidate in candidates: - file_name = str(candidate.get("file_name", "") or "").lower() - aggressive_count = sum(1 for kw in AGGRESSIVE_KEYWORDS if kw in file_name) - - if strict and aggressive_count > 0: - continue - - # Apply penalty instead of filtering completely - if aggressive_count > 0: - penalty = 0.85 ** aggressive_count - candidate_copy = dict(candidate) - original_score = float(candidate.get("score", 0.0)) - candidate_copy["score"] = original_score * penalty - filtered.append(candidate_copy) - else: - filtered.append(candidate) - - return filtered - - -# ============================================================================ -# INTEGRATION HELPERS -# ============================================================================ - -def create_enhanced_match_report( - role: str, - selected_sample: Optional[Dict[str, Any]], - all_candidates: List[Dict[str, Any]], - validation_result: Optional[Dict[str, Any]] = None, -) -> Dict[str, Any]: - """ - Creates a detailed report for a matching decision. - - Args: - role: The role being matched - selected_sample: The selected sample - all_candidates: All candidates that were considered - validation_result: Validation result for the selected sample - - Returns: - A dict with detailed matching report - """ - report = { - "role": role, - "selected": selected_sample is not None, - "candidates_count": len(all_candidates), - "threshold": ROLE_SCORE_THRESHOLDS.get(role, 0.30), - } - - if selected_sample: - report["selected_sample"] = { - "name": selected_sample.get("file_name"), - "path": selected_sample.get("path"), - "score": selected_sample.get("score"), - "adjusted_score": selected_sample.get("adjusted_score"), - "tempo": selected_sample.get("tempo"), - "key": selected_sample.get("key"), - "duration": selected_sample.get("duration"), - } - - if validation_result: - report["validation"] = { - "valid": validation_result.get("valid"), - "score": validation_result.get("score"), - "warnings": validation_result.get("warnings", []), - } - - return report - - -def get_role_info(role: str) -> Dict[str, Any]: - """ - Gets comprehensive information about a role. - - Args: - role: The role name - - Returns: - Dict with role information including valid samples count, thresholds, etc. - """ - if role not in VALID_ROLES: - return {"error": f"Unknown role: {role}"} - - config = VALID_ROLES[role] - aliases = ROLE_ALIASES.get(role, []) - - return { - "role": role, - "config": config, - "aliases": aliases, - "threshold": ROLE_SCORE_THRESHOLDS.get(role, 0.30), - "bus": config.get("bus", "music"), - "is_loop": config.get("is_loop", False), - } \ No newline at end of file diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/sample_index.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/sample_index.py deleted file mode 100644 index 186b338..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/sample_index.py +++ /dev/null @@ -1,308 +0,0 @@ -""" -sample_index.py - Índice y búsqueda de samples para AbletonMCP-AI - -Gestiona la librería de samples locales con metadatos extraídos de los nombres. -""" - -import json -import logging -from pathlib import Path -from typing import List, Dict, Any, Optional -import re - -logger = logging.getLogger("SampleIndex") - - -class SampleIndex: - """Índice de samples con búsqueda y metadatos""" - - # Categorías por palabras clave - CATEGORIES = { - 'kick': ['kick', 'bd', 'bass drum', 'kick drum'], - 'snare': ['snare', 'sd', 'snr'], - 'clap': ['clap', 'clp'], - 'hat': ['hat', 'hh', 'hihat', 'hi-hat', 'closed hat', 'open hat'], - 'perc': ['perc', 'percussion', 'conga', 'bongo', 'shaker', 'tamb', 'timb'], - 'bass': ['bass', 'bassline', 'sub', '808', ' Reese'], - 'synth': ['synth', 'lead', 'pad', 'arp', 'pluck', 'stab', 'chord'], - 'vocal': ['vocal', 'vox', 'voice', 'speech', 'talk'], - 'fx': ['fx', 'effect', 'sweep', 'riser', 'downlifter', 'impact', 'hit'], - 'loop': ['loop', 'full', 'groove'], - } - - def __init__(self, base_dir: str): - """ - Inicializa el índice de samples - - Args: - base_dir: Directorio base donde buscar samples - """ - self.base_dir = Path(base_dir) - self.samples: List[Dict[str, Any]] = [] - self.index_file = self.base_dir / ".sample_index.json" - - # Cargar o construir índice - if self.index_file.exists(): - self._load_index() - else: - self._build_index() - self._save_index() - - def _build_index(self): - """Construye el índice escaneando el directorio""" - logger.info(f"Construyendo índice de samples en: {self.base_dir}") - - extensions = {'.wav', '.aif', '.aiff', '.mp3', '.ogg'} - - for file_path in self.base_dir.rglob('*'): - if file_path.suffix.lower() in extensions: - sample_info = self._analyze_sample(file_path) - self.samples.append(sample_info) - - logger.info(f"Índice construido: {len(self.samples)} samples encontrados") - - def _analyze_sample(self, file_path: Path) -> Dict[str, Any]: - """Analiza un sample y extrae metadatos del nombre""" - name = file_path.stem - name_lower = name.lower() - - # Determinar categoría - category = self._detect_category(name_lower) - - # Extraer key del nombre - key = self._extract_key(name) - - # Extraer BPM del nombre - bpm = self._extract_bpm(name) - - return { - 'name': name, - 'path': str(file_path), - 'category': category, - 'key': key, - 'bpm': bpm, - 'size': file_path.stat().st_size if file_path.exists() else 0, - } - - def _detect_category(self, name: str) -> str: - """Detecta la categoría basada en palabras clave""" - for category, keywords in self.CATEGORIES.items(): - for keyword in keywords: - if keyword in name: - return category - return 'unknown' - - def _extract_key(self, name: str) -> Optional[str]: - """Extrae la tonalidad del nombre del archivo""" - # Patrones comunes: "Key A", "in A", "A minor", "Am", "F#m", etc. - patterns = [ - r'[_\s\-]([A-G][#b]?m?)\s*(?:minor|major)?[_\s\-]?', - r'[_\s\-]([A-G][#b]?)[_\s\-]', - r'\bin\s+([A-G][#b]?m?)\b', - r'Key\s+([A-G][#b]?m?)', - ] - - for pattern in patterns: - match = re.search(pattern, name, re.IGNORECASE) - if match: - key = match.group(1) - # Normalizar - key = key.replace('b', '#').replace('Db', 'C#').replace('Eb', 'D#') - key = key.replace('Gb', 'F#').replace('Ab', 'G#').replace('Bb', 'A#') - return key - - return None - - def _extract_bpm(self, name: str) -> Optional[int]: - """Extrae el BPM del nombre del archivo""" - # Patrones: "128 BPM", "_128_", "128bpm", etc. - patterns = [ - r'[_\s\-](\d{2,3})\s*BPM', - r'[_\s\-](\d{2,3})[_\s\-]', - r'(\d{2,3})bpm', - ] - - for pattern in patterns: - match = re.search(pattern, name, re.IGNORECASE) - if match: - bpm = int(match.group(1)) - if 60 <= bpm <= 200: # Rango razonable - return bpm - - return None - - def _load_index(self): - """Carga el índice desde archivo""" - try: - with open(self.index_file, 'r') as f: - data = json.load(f) - self.samples = data.get('samples', []) - logger.info(f"Índice cargado: {len(self.samples)} samples") - except Exception as e: - logger.error(f"Error cargando índice: {e}") - self._build_index() - - def _save_index(self): - """Guarda el índice a archivo""" - try: - with open(self.index_file, 'w') as f: - json.dump({ - 'samples': self.samples, - 'base_dir': str(self.base_dir) - }, f, indent=2) - logger.info(f"Índice guardado en: {self.index_file}") - except Exception as e: - logger.error(f"Error guardando índice: {e}") - - def search(self, query: str, category: str = "", limit: int = 10) -> List[Dict[str, Any]]: - """ - Busca samples por query y/o categoría - - Args: - query: Término de búsqueda - category: Categoría específica (opcional) - limit: Número máximo de resultados - - Returns: - Lista de samples que coinciden - """ - query_lower = query.lower() - results = [] - - for sample in self.samples: - # Filtrar por categoría si se especificó - if category and sample['category'] != category.lower(): - continue - - # Buscar en nombre - name = sample['name'].lower() - if query_lower in name: - # Calcular score de relevancia - score = 0 - if query_lower == sample.get('category', ''): - score += 10 # Coincidencia exacta de categoría - if query_lower in name.split('_'): - score += 5 # Palabra completa - if name.startswith(query_lower): - score += 3 # Comienza con el término - - results.append((score, sample)) - - # Ordenar por score y limitar - results.sort(key=lambda x: x[0], reverse=True) - return [sample for _, sample in results[:limit]] - - def find_by_key(self, key: str, category: str = "", limit: int = 10) -> List[Dict[str, Any]]: - """Busca samples por tonalidad""" - results = [] - - for sample in self.samples: - if sample.get('key') == key: - if not category or sample['category'] == category: - results.append(sample) - - return results[:limit] - - def find_by_bpm(self, bpm: int, tolerance: int = 5, limit: int = 10) -> List[Dict[str, Any]]: - """Busca samples por BPM con tolerancia""" - results = [] - - for sample in self.samples: - sample_bpm = sample.get('bpm') - if sample_bpm and abs(sample_bpm - bpm) <= tolerance: - results.append(sample) - - return results[:limit] - - def get_random_sample(self, category: str = "") -> Optional[Dict[str, Any]]: - """Obtiene un sample aleatorio, opcionalmente filtrado por categoría""" - import random - - samples = self.samples - if category: - samples = [s for s in samples if s['category'] == category] - - return random.choice(samples) if samples else None - - def get_sample_pack(self, genre: str, key: str = "", bpm: int = 0) -> Dict[str, List[Dict]]: - """ - Obtiene un pack de samples completo para un género - - Args: - genre: Género musical - key: Tonalidad preferida - bpm: BPM preferido - - Returns: - Dict con samples organizados por categoría - """ - pack = { - 'kick': [], - 'snare': [], - 'hat': [], - 'clap': [], - 'perc': [], - 'bass': [], - 'synth': [], - 'fx': [], - } - - # Seleccionar un sample de cada categoría - for category in pack.keys(): - candidates = [s for s in self.samples if s['category'] == category] - - # Filtrar por key si se especificó - if key and candidates: - key_matches = [s for s in candidates if s.get('key') == key] - if key_matches: - candidates = key_matches - - # Filtrar por BPM si se especificó - if bpm and candidates: - bpm_matches = [s for s in candidates if s.get('bpm')] - if bpm_matches: - # Ordenar por cercanía al BPM objetivo - bpm_matches.sort(key=lambda s: abs(s['bpm'] - bpm)) - candidates = bpm_matches[:5] # Top 5 más cercanos - - # Seleccionar hasta 3 samples - import random - if candidates: - pack[category] = random.sample(candidates, min(3, len(candidates))) - - return pack - - def refresh(self): - """Reconstruye el índice desde cero""" - logger.info("Refrescando índice...") - self._build_index() - self._save_index() - - -# Función de utilidad para testing -if __name__ == "__main__": - import sys - - if len(sys.argv) < 2: - print("Uso: python sample_index.py ") - sys.exit(1) - - logging.basicConfig(level=logging.INFO) - - index = SampleIndex(sys.argv[1]) - - print(f"\nÍndice cargado: {len(index.samples)} samples") - print("\nDistribución por categoría:") - - categories = {} - for sample in index.samples: - cat = sample['category'] - categories[cat] = categories.get(cat, 0) + 1 - - for cat, count in sorted(categories.items(), key=lambda x: -x[1]): - print(f" {cat}: {count}") - - # Ejemplo de búsqueda - print("\nBúsqueda 'kick':") - for s in index.search("kick", limit=5): - print(f" - {s['name']} ({s.get('key', '?')}, {s.get('bpm', '?')} BPM)") diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/sample_manager.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/sample_manager.py deleted file mode 100644 index 8ff4148..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/sample_manager.py +++ /dev/null @@ -1,1010 +0,0 @@ -""" -sample_manager.py - Gestión completa de librería de samples - -Proporciona: -- Indexación y escaneo de directorios de samples -- Clasificación automática por tipo, key, BPM -- Gestión de metadatos y tags -- Búsqueda avanzada con filtros múltiples -- Caché de índice para rendimiento -- Soporte para múltiples formatos (WAV, AIFF, MP3, OGG, FLAC) -""" - -import json -import hashlib -import logging -from pathlib import Path -from typing import Dict, List, Any, Optional, Tuple, Callable -from dataclasses import dataclass, field, asdict -from datetime import datetime -from collections import defaultdict -import threading - -# Importar analizador de audio -try: - from .audio_analyzer import AudioAnalyzer, SampleType, analyze_sample, quick_analyze - AUDIO_ANALYSIS_AVAILABLE = True -except ImportError: - try: - from audio_analyzer import AudioAnalyzer, SampleType, analyze_sample, quick_analyze - AUDIO_ANALYSIS_AVAILABLE = True - except ImportError: - AUDIO_ANALYSIS_AVAILABLE = False - AudioAnalyzer = None - SampleType = None - analyze_sample = None - quick_analyze = None - -logger = logging.getLogger("SampleManager") - - -@dataclass -class Sample: - """Representa un sample en la librería""" - id: str - name: str - path: str - category: str - subcategory: str - sample_type: str - key: Optional[str] = None - bpm: Optional[float] = None - duration: float = 0.0 - sample_rate: int = 44100 - channels: int = 2 - file_size: int = 0 - format: str = "wav" - - # Metadatos adicionales - genres: List[str] = field(default_factory=list) - tags: List[str] = field(default_factory=list) - mood: str = "" - energy: float = 0.5 # 0-1 - - # Información de análisis - analyzed: bool = False - analysis_version: int = 0 - spectral_centroid: float = 0.0 - rms_energy: float = 0.0 - is_harmonic: bool = False - is_percussive: bool = False - - # Metadatos del sistema - date_added: str = field(default_factory=lambda: datetime.now().isoformat()) - date_modified: str = field(default_factory=lambda: datetime.now().isoformat()) - play_count: int = 0 - rating: int = 0 # 0-5 - - def to_dict(self) -> Dict[str, Any]: - """Convierte el sample a diccionario""" - return asdict(self) - - @classmethod - def from_dict(cls, data: Dict[str, Any]) -> 'Sample': - """Crea un Sample desde un diccionario""" - # Filtrar solo los campos que existen en la clase - valid_fields = {f.name for f in cls.__dataclass_fields__.values()} - filtered_data = {k: v for k, v in data.items() if k in valid_fields} - return cls(**filtered_data) - - def get_display_name(self) -> str: - """Nombre formateado para mostrar""" - parts = [self.name] - if self.key: - parts.append(f"Key: {self.key}") - if self.bpm: - parts.append(f"{self.bpm:.1f} BPM") - return " | ".join(parts) - - -class SampleManager: - """ - Gestor principal de la librería de samples. - - Características: - - Indexación recursiva de directorios - - Clasificación automática por tipo - - Detección de key y BPM (si librosa está disponible) - - Búsqueda avanzada con múltiples filtros - - Sistema de favoritos y ratings - - Caché persistente en JSON - """ - - # Categorías principales y subcategorías - CATEGORIES = { - 'drums': { - 'kick': ['kick', 'bd', 'bass drum', 'kickdrum'], - 'snare': ['snare', 'snr', 'sd', 'rimshot'], - 'clap': ['clap', 'clp', 'handclap'], - 'hat_closed': ['closed hat', 'chh', 'closed'], - 'hat_open': ['open hat', 'ohh', 'open'], - 'hat': ['hat', 'hihat', 'hi-hat'], - 'perc': ['perc', 'percussion', 'conga', 'bongo', 'timbale'], - 'shaker': ['shaker', 'tambourine', 'tamb'], - 'tom': ['tom', 'tomtom'], - 'cymbal': ['crash', 'ride', 'cymbal', 'china'], - }, - 'bass': { - 'sub': ['sub', 'subbass', '808'], - 'bassline': ['bassline', 'bass', 'reese'], - 'acid': ['acid', 'tb303', '303'], - }, - 'synths': { - 'lead': ['lead', 'solo', 'main'], - 'pad': ['pad', 'atmosphere', 'dron', 'ambient'], - 'pluck': ['pluck', 'arp', 'arpeggio'], - 'chord': ['chord', 'stab', 'hit'], - 'fx': ['fx', 'effect', 'sweep', 'riser', 'downlifter'], - }, - 'vocals': { - 'vocal': ['vocal', 'vox', 'voice'], - 'speech': ['speech', 'talk', 'phrase'], - 'chant': ['chant', 'shout', 'yell'], - }, - 'loops': { - 'drum_loop': ['drum loop', 'beat loop', 'groove'], - 'perc_loop': ['perc loop', 'percussion loop'], - 'bass_loop': ['bass loop', 'bassline loop'], - 'synth_loop': ['synth loop', 'lead loop'], - 'full_loop': ['full loop', 'complete loop'], - }, - 'one_shots': { - 'hit': ['hit', 'impact', 'sting'], - 'noise': ['noise', 'texture', 'grain'], - } - } - - # Mapeo de extensiones de archivo - SUPPORTED_FORMATS = {'.wav', '.aif', '.aiff', '.mp3', '.ogg', '.flac', '.m4a'} - - # Géneros soportados con palabras clave - GENRE_KEYWORDS = { - 'house': ['house', 'deep', 'soulful', 'garage', 'classic'], - 'techno': ['techno', 'industrial', 'detroit', 'berlin', 'acid'], - 'tech-house': ['tech house', 'tech-house', 'groovy', 'bouncy'], - 'trance': ['trance', 'progressive', 'uplifting', 'psy'], - 'drum-and-bass': ['drum and bass', 'dnb', 'neuro', 'liquid', 'jungle'], - 'hip-hop': ['hip hop', 'hiphop', 'trap', 'boom bap', 'lofi'], - 'ambient': ['ambient', 'chillout', 'downtempo', 'meditation'], - 'edm': ['edm', 'electro', 'big room', 'festival'], - } - - def __init__(self, base_dir: str, cache_dir: Optional[str] = None): - """ - Inicializa el gestor de samples. - - Args: - base_dir: Directorio raíz de la librería de samples - cache_dir: Directorio para caché (default: base_dir/.sample_cache) - """ - self.base_dir = Path(base_dir) - self.cache_dir = Path(cache_dir) if cache_dir else self.base_dir / ".sample_cache" - self.cache_dir.mkdir(exist_ok=True) - - self.samples: Dict[str, Sample] = {} - self.index_file = self.cache_dir / "sample_library.json" - self.stats_file = self.cache_dir / "library_stats.json" - - # Analizador de audio - self.analyzer = AudioAnalyzer() if AUDIO_ANALYSIS_AVAILABLE else None - - # Locks para thread-safety - self._lock = threading.RLock() - self._index_dirty = False - - # Estadísticas - self.stats = { - 'total_samples': 0, - 'total_size': 0, - 'by_category': defaultdict(int), - 'by_key': defaultdict(int), - 'by_bpm_range': defaultdict(int), - 'last_scan': None, - } - - # Cargar índice existente - self._load_index() - - def _generate_id(self, file_path: str) -> str: - """Genera un ID único para un sample basado en su ruta""" - return hashlib.md5(file_path.encode()).hexdigest()[:16] - - def _get_file_hash(self, file_path: Path) -> str: - """Calcula hash del archivo para detectar cambios""" - stat = file_path.stat() - return hashlib.md5(f"{stat.st_size}_{stat.st_mtime}".encode()).hexdigest() - - def scan_directory(self, directory: Optional[str] = None, - recursive: bool = True, - analyze_audio: bool = False, - progress_callback: Optional[Callable[[int, int, str], None]] = None) -> Dict[str, Any]: - """ - Escanear un directorio en busca de samples. - - Args: - directory: Directorio a escanear (default: base_dir) - recursive: Escanear subdirectorios - analyze_audio: Analizar contenido de audio (más lento) - progress_callback: Función llamada con (procesados, total, archivo_actual) - - Returns: - Estadísticas del escaneo - """ - scan_dir = Path(directory) if directory else self.base_dir - - if not scan_dir.exists(): - raise FileNotFoundError(f"Directorio no encontrado: {scan_dir}") - - logger.info(f"Escaneando: {scan_dir}") - - # Encontrar todos los archivos de audio - if recursive: - audio_files = list(scan_dir.rglob('*')) - else: - audio_files = list(scan_dir.iterdir()) - - audio_files = [f for f in audio_files - if f.is_file() and f.suffix.lower() in self.SUPPORTED_FORMATS] - - total = len(audio_files) - processed = 0 - added = 0 - updated = 0 - errors = 0 - - logger.info(f"Encontrados {total} archivos de audio") - - with self._lock: - for file_path in audio_files: - processed += 1 - - if progress_callback: - progress_callback(processed, total, str(file_path.name)) - - try: - result = self._process_file(file_path, analyze_audio) - if result == 'added': - added += 1 - elif result == 'updated': - updated += 1 - - except Exception as e: - logger.error(f"Error procesando {file_path}: {e}") - errors += 1 - - self._index_dirty = True - self._update_stats() - self._save_index() - - self.stats['last_scan'] = datetime.now().isoformat() - - return { - 'processed': processed, - 'added': added, - 'updated': updated, - 'errors': errors, - 'total_samples': len(self.samples), - } - - def _process_file(self, file_path: Path, analyze_audio: bool) -> str: - """Procesa un archivo individual. Retorna 'added', 'updated', o 'unchanged'""" - file_id = self._generate_id(str(file_path)) - self._get_file_hash(file_path) - - # Verificar si ya existe y no ha cambiado - if file_id in self.samples: - existing = self.samples[file_id] - # Comparar hash implícito por fecha de modificación - current_stat = file_path.stat() - if existing.date_modified: - try: - mod_time = datetime.fromisoformat(existing.date_modified).timestamp() - if abs(current_stat.st_mtime - mod_time) < 1: - return 'unchanged' - except Exception: - pass - - # Extraer información del nombre - name = file_path.stem - category, subcategory = self._classify_by_name(name) - sample_type = self._detect_sample_type(name) - key = self._extract_key_from_name(name) - bpm = self._extract_bpm_from_name(name) - genres = self._detect_genres(name) - - # Análisis de audio si está disponible - audio_features = {} - if analyze_audio and self.analyzer: - try: - audio_features = analyze_sample(str(file_path)) - # Usar valores detectados si no están en el nombre - if not bpm and audio_features.get('bpm'): - bpm = audio_features['bpm'] - if not key and audio_features.get('key'): - key = audio_features['key'] - if audio_features.get('sample_type'): - sample_type = audio_features['sample_type'] - if audio_features.get('suggested_genres'): - genres = list(set(genres + audio_features['suggested_genres'])) - except Exception as e: - logger.warning(f"Error analizando audio {file_path}: {e}") - - # Crear o actualizar sample - is_new = file_id not in self.samples - - sample = Sample( - id=file_id, - name=name, - path=str(file_path), - category=category, - subcategory=subcategory, - sample_type=sample_type, - key=key, - bpm=bpm, - duration=audio_features.get('duration', 0.0), - sample_rate=audio_features.get('sample_rate', 44100), - file_size=file_path.stat().st_size, - format=file_path.suffix.lower().lstrip('.'), - genres=genres, - tags=self._extract_tags(name), - analyzed=analyze_audio, - spectral_centroid=audio_features.get('spectral_centroid', 0.0), - rms_energy=audio_features.get('rms_energy', 0.0), - is_harmonic=audio_features.get('is_harmonic', False), - is_percussive=audio_features.get('is_percussive', False), - date_modified=datetime.now().isoformat(), - ) - - self.samples[file_id] = sample - return 'added' if is_new else 'updated' - - def _classify_by_name(self, name: str) -> Tuple[str, str]: - """Clasifica un sample por su nombre en categoría y subcategoría""" - name_lower = name.lower() - - for category, subcategories in self.CATEGORIES.items(): - for subcategory, keywords in subcategories.items(): - for keyword in keywords: - if keyword in name_lower: - return category, subcategory - - # Fallback: intentar detectar loops - if 'loop' in name_lower: - return 'loops', 'unknown' - - return 'unknown', 'unknown' - - def _detect_sample_type(self, name: str) -> str: - """Detecta el tipo específico de sample""" - category, subcategory = self._classify_by_name(name) - - if category == 'drums': - return subcategory - elif category == 'bass': - return f"bass_{subcategory}" - elif category == 'synths': - return subcategory - elif category == 'vocals': - return subcategory - elif category == 'loops': - return subcategory - - return 'unknown' - - def _extract_key_from_name(self, name: str) -> Optional[str]: - """Extrae la tonalidad del nombre del archivo""" - import re - - # Patrones comunes - patterns = [ - r'[_\s\-]([A-G][#b]?(?:m|min|minor)?)[_\s\-]', - r'\bin\s+([A-G][#b]?(?:m|min|minor)?)\b', - r'Key[_\s]?([A-G][#b]?(?:m|min|minor)?)', - r'[_\s\-]([A-G][#b]?)\s*(?:maj|major)?[_\s\-]', - ] - - for pattern in patterns: - match = re.search(pattern, name, re.IGNORECASE) - if match: - key = match.group(1) - # Normalizar bemoles a sostenidos - key = key.replace('b', '#').replace('Db', 'C#').replace('Eb', 'D#') - key = key.replace('Gb', 'F#').replace('Ab', 'G#').replace('Bb', 'A#') - - # Detectar modo - is_minor = 'm' in key.lower() or 'min' in key.lower() - key = key.replace('min', '').replace('minor', '').replace('major', '') - key = key.rstrip('mM') - - if is_minor: - key = key + 'm' - - return key - - return None - - def _extract_bpm_from_name(self, name: str) -> Optional[float]: - """Extrae el BPM del nombre del archivo""" - import re - - patterns = [ - r'[_\s\-](\d{2,3})\s*BPM', - r'[_\s\-](\d{2,3})[_\s\-]', - r'(\d{2,3})bpm', - ] - - for pattern in patterns: - match = re.search(pattern, name, re.IGNORECASE) - if match: - bpm = int(match.group(1)) - if 60 <= bpm <= 200: - return float(bpm) - - return None - - def _detect_genres(self, name: str) -> List[str]: - """Detecta géneros musicales del nombre""" - name_lower = name.lower() - genres = [] - - for genre, keywords in self.GENRE_KEYWORDS.items(): - for keyword in keywords: - if keyword in name_lower: - genres.append(genre) - break - - return genres - - def _extract_tags(self, name: str) -> List[str]: - """Extrae tags del nombre del archivo""" - import re - - tags = [] - name_lower = name.lower() - - # Palabras comunes como tags - common_tags = [ - 'dry', 'wet', 'processed', 'raw', 'analog', 'digital', - 'vintage', 'modern', 'punchy', 'deep', 'bright', 'dark', - 'tight', 'loose', 'fat', 'thin', 'crisp', 'warm', - 'one shot', 'loop', 'sample', 'hit' - ] - - for tag in common_tags: - if tag in name_lower: - tags.append(tag.replace(' ', '_')) - - # Extraer números como versiones - numbers = re.findall(r'\d+', name) - for num in numbers: - if len(num) <= 2: # Probablemente versión - tags.append(f"v{num}") - - return list(set(tags)) - - def search(self, - query: str = "", - category: str = "", - subcategory: str = "", - sample_type: str = "", - key: str = "", - bpm: Optional[float] = None, - bpm_tolerance: int = 5, - genres: List[str] = None, - tags: List[str] = None, - min_rating: int = 0, - favorites_only: bool = False, - limit: int = 50, - sort_by: str = "name") -> List[Sample]: - """ - Búsqueda avanzada de samples con múltiples filtros. - - Args: - query: Búsqueda por nombre - category: Categoría principal - subcategory: Subcategoría - sample_type: Tipo específico - key: Tonalidad musical - bpm: BPM objetivo - bpm_tolerance: Tolerancia de BPM (+/-) - genres: Lista de géneros - tags: Lista de tags - min_rating: Rating mínimo - favorites_only: Solo favoritos - limit: Límite de resultados - sort_by: Campo para ordenar - - Returns: - Lista de samples que coinciden - """ - with self._lock: - results = [] - query_lower = query.lower() - - for sample in self.samples.values(): - # Filtro por query (nombre) - if query and query_lower not in sample.name.lower(): - continue - - # Filtros de categoría - if category and sample.category != category.lower(): - continue - if subcategory and sample.subcategory != subcategory.lower(): - continue - if sample_type and sample.sample_type != sample_type.lower(): - continue - - # Filtro por key - if key: - sample_key = (sample.key or "").lower() - if sample_key != key.lower(): - # Intentar key compatible (mismo root) - if not sample_key.startswith(key.lower().rstrip('m')): - continue - - # Filtro por BPM - if bpm is not None and sample.bpm: - if abs(sample.bpm - bpm) > bpm_tolerance: - continue - - # Filtro por géneros - if genres: - sample_genres = [g.lower() for g in sample.genres] - if not any(g.lower() in sample_genres for g in genres): - continue - - # Filtro por tags - if tags: - sample_tags = [t.lower() for t in sample.tags] - if not any(t.lower() in sample_tags for t in tags): - continue - - # Filtro por rating - if min_rating > 0 and sample.rating < min_rating: - continue - - # Filtro de favoritos - if favorites_only and sample.rating < 4: - continue - - results.append(sample) - - # Ordenar resultados - if sort_by == "name": - results.sort(key=lambda s: s.name.lower()) - elif sort_by == "bpm": - results.sort(key=lambda s: s.bpm or 0) - elif sort_by == "rating": - results.sort(key=lambda s: s.rating, reverse=True) - elif sort_by == "date_added": - results.sort(key=lambda s: s.date_added, reverse=True) - - return results[:limit] - - def get_by_id(self, sample_id: str) -> Optional[Sample]: - """Obtiene un sample por su ID""" - with self._lock: - return self.samples.get(sample_id) - - def get_by_path(self, file_path: str) -> Optional[Sample]: - """Obtiene un sample por su ruta""" - sample_id = self._generate_id(file_path) - return self.get_by_id(sample_id) - - def get_random(self, category: str = "", limit: int = 1) -> List[Sample]: - """Obtiene samples aleatorios""" - import random - - with self._lock: - samples = list(self.samples.values()) - - if category: - samples = [s for s in samples if s.category == category] - - if not samples: - return [] - - return random.sample(samples, min(limit, len(samples))) - - def get_pack_for_genre(self, genre: str, key: str = "", - bpm: Optional[float] = None) -> Dict[str, List[Sample]]: - """ - Obtiene un pack completo de samples para un género específico. - - Returns: - Dict con samples organizados por tipo - """ - pack = { - 'kicks': [], - 'snares': [], - 'claps': [], - 'hats': [], - 'percussion': [], - 'bass': [], - 'synths': [], - 'fx': [], - } - - # Buscar samples por tipo - type_mapping = { - 'kicks': ['kick'], - 'snares': ['snare'], - 'claps': ['clap'], - 'hats': ['hat', 'hat_closed', 'hat_open'], - 'percussion': ['perc', 'shaker', 'tom', 'cymbal'], - 'bass': ['bass', 'sub', 'bassline', 'acid'], - 'synths': ['lead', 'pad', 'pluck', 'chord'], - 'fx': ['fx', 'hit', 'noise'], - } - - for pack_category, sample_types in type_mapping.items(): - for sample_type in sample_types: - samples = self.search( - sample_type=sample_type, - key=key, - bpm=bpm, - genres=[genre] if genre else None, - limit=5 - ) - - if samples: - pack[pack_category].extend(samples) - - return pack - - def update_sample(self, sample_id: str, **kwargs) -> bool: - """ - Actualiza metadatos de un sample. - - Args: - sample_id: ID del sample - **kwargs: Campos a actualizar - """ - with self._lock: - if sample_id not in self.samples: - return False - - sample = self.samples[sample_id] - - # Campos permitidos para actualización - allowed_fields = { - 'rating', 'tags', 'genres', 'mood', 'energy', - 'key', 'bpm', 'play_count' - } - - for field, value in kwargs.items(): - if field in allowed_fields and hasattr(sample, field): - setattr(sample, field, value) - - sample.date_modified = datetime.now().isoformat() - self._index_dirty = True - - return True - - def rate_sample(self, sample_id: str, rating: int) -> bool: - """Califica un sample (1-5 estrellas)""" - if 0 <= rating <= 5: - return self.update_sample(sample_id, rating=rating) - return False - - def increment_play_count(self, sample_id: str) -> bool: - """Incrementa el contador de reproducciones""" - sample = self.get_by_id(sample_id) - if sample: - return self.update_sample(sample_id, play_count=sample.play_count + 1) - return False - - def delete_sample(self, sample_id: str, delete_file: bool = False) -> bool: - """ - Elimina un sample del índice. - - Args: - sample_id: ID del sample - delete_file: Si True, también elimina el archivo físico - """ - with self._lock: - if sample_id not in self.samples: - return False - - sample = self.samples[sample_id] - - if delete_file: - try: - Path(sample.path).unlink() - except Exception as e: - logger.error(f"Error eliminando archivo: {e}") - return False - - del self.samples[sample_id] - self._index_dirty = True - self._update_stats() - - return True - - def refresh(self, analyze_audio: bool = False) -> Dict[str, Any]: - """Refresca el índice completo""" - logger.info("Refrescando índice de samples...") - - # Guardar IDs actuales para detectar eliminados - current_paths = {s.path for s in self.samples.values()} - - # Re-escanear - stats = self.scan_directory(analyze_audio=analyze_audio) - - # Detectar archivos eliminados - new_paths = {s.path for s in self.samples.values()} - removed = current_paths - new_paths - - for path in removed: - sample_id = self._generate_id(path) - if sample_id in self.samples: - del self.samples[sample_id] - stats['removed'] = stats.get('removed', 0) + 1 - - self._save_index() - return stats - - def get_stats(self) -> Dict[str, Any]: - """Obtiene estadísticas de la librería""" - with self._lock: - return { - 'total_samples': len(self.samples), - 'total_size': sum(s.file_size for s in self.samples.values()), - 'by_category': dict(self.stats['by_category']), - 'by_key': dict(self.stats['by_key']), - 'by_bpm_range': dict(self.stats['by_bpm_range']), - 'last_scan': self.stats['last_scan'], - } - - def export_library(self, output_path: str, format: str = "json") -> str: - """ - Exporta la librería a un archivo. - - Args: - output_path: Ruta del archivo de salida - format: 'json' o 'csv' - - Returns: - Ruta del archivo exportado - """ - output = Path(output_path) - - with self._lock: - if format == "json": - data = { - 'export_date': datetime.now().isoformat(), - 'stats': self.get_stats(), - 'samples': [s.to_dict() for s in self.samples.values()] - } - with open(output, 'w', encoding='utf-8') as f: - json.dump(data, f, indent=2, ensure_ascii=False) - - elif format == "csv": - import csv - with open(output, 'w', newline='', encoding='utf-8') as f: - if self.samples: - writer = csv.DictWriter(f, fieldnames=self.samples[list(self.samples.keys())[0]].to_dict().keys()) - writer.writeheader() - for sample in self.samples.values(): - writer.writerow(sample.to_dict()) - - return str(output) - - def import_library(self, input_path: str, merge: bool = True) -> Dict[str, int]: - """ - Importa una librería desde un archivo JSON. - - Args: - input_path: Ruta del archivo a importar - merge: Si True, mezcla con la librería existente - - Returns: - Estadísticas de la importación - """ - with open(input_path, 'r', encoding='utf-8') as f: - data = json.load(f) - - imported_samples = data.get('samples', []) - - with self._lock: - if not merge: - self.samples.clear() - - added = 0 - updated = 0 - - for sample_data in imported_samples: - try: - sample = Sample.from_dict(sample_data) - if sample.id in self.samples: - updated += 1 - else: - added += 1 - self.samples[sample.id] = sample - except Exception as e: - logger.error(f"Error importando sample: {e}") - - self._index_dirty = True - self._update_stats() - self._save_index() - - return {'added': added, 'updated': updated} - - def _update_stats(self): - """Actualiza las estadísticas de la librería""" - self.stats['total_samples'] = len(self.samples) - self.stats['total_size'] = sum(s.file_size for s in self.samples.values()) - - # Resetear contadores - self.stats['by_category'] = defaultdict(int) - self.stats['by_key'] = defaultdict(int) - self.stats['by_bpm_range'] = defaultdict(int) - - for sample in self.samples.values(): - self.stats['by_category'][sample.category] += 1 - - if sample.key: - self.stats['by_key'][sample.key] += 1 - - if sample.bpm: - if sample.bpm < 100: - self.stats['by_bpm_range']['slow (<100)'] += 1 - elif sample.bpm < 128: - self.stats['by_bpm_range']['mid (100-128)'] += 1 - elif sample.bpm < 140: - self.stats['by_bpm_range']['fast (128-140)'] += 1 - else: - self.stats['by_bpm_range']['very fast (>140)'] += 1 - - def _load_index(self): - """Carga el índice desde disco""" - if not self.index_file.exists(): - logger.info("No existe índice previo, iniciando librería vacía") - return - - try: - with open(self.index_file, 'r', encoding='utf-8') as f: - data = json.load(f) - - for sample_data in data.get('samples', []): - try: - sample = Sample.from_dict(sample_data) - self.samples[sample.id] = sample - except Exception as e: - logger.warning(f"Error cargando sample: {e}") - - self.stats = data.get('stats', self.stats) - logger.info(f"Índice cargado: {len(self.samples)} samples") - - except Exception as e: - logger.error(f"Error cargando índice: {e}") - - def _save_index(self): - """Guarda el índice a disco""" - if not self._index_dirty: - return - - try: - data = { - 'version': 1, - 'saved_at': datetime.now().isoformat(), - 'stats': self.get_stats(), - 'samples': [s.to_dict() for s in self.samples.values()] - } - - # Guardar a archivo temporal primero - temp_file = self.index_file.with_suffix('.tmp') - with open(temp_file, 'w', encoding='utf-8') as f: - json.dump(data, f, indent=2, ensure_ascii=False) - - # Renombrar atómicamente - temp_file.replace(self.index_file) - - self._index_dirty = False - logger.info(f"Índice guardado: {len(self.samples)} samples") - - except Exception as e: - logger.error(f"Error guardando índice: {e}") - - def save(self): - """Fuerza el guardado del índice""" - self._index_dirty = True - self._save_index() - - -# Instancia global -_manager: Optional[SampleManager] = None - - -def get_manager(base_dir: Optional[str] = None) -> SampleManager: - """Obtiene la instancia global del gestor""" - global _manager - if _manager is None: - if base_dir is None: - PACKAGE_DIR = Path(__file__).resolve().parent.parent - base_dir = str(Path(r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\librerias\all_tracks")) - _manager = SampleManager(base_dir) - return _manager - - -# Funciones de conveniencia -def scan_samples(directory: str, analyze_audio: bool = False) -> Dict[str, Any]: - """Escanear directorio de samples""" - manager = get_manager(directory) - return manager.scan_directory(analyze_audio=analyze_audio) - - -def find_samples(query: str = "", **kwargs) -> List[Dict[str, Any]]: - """Buscar samples""" - manager = get_manager() - samples = manager.search(query=query, **kwargs) - return [s.to_dict() for s in samples] - - -def get_sample_pack(genre: str, key: str = "", bpm: Optional[float] = None) -> Dict[str, List[Dict]]: - """Obtener pack de samples para un género""" - manager = get_manager() - pack = manager.get_pack_for_genre(genre, key, bpm) - return {k: [s.to_dict() for s in v] for k, v in pack.items()} - - -# Testing -if __name__ == "__main__": - import sys - - logging.basicConfig(level=logging.INFO) - - if len(sys.argv) < 2: - print("Uso: python sample_manager.py [comando]") - print("\nComandos:") - print(" scan - Escanear directorio") - print(" stats - Mostrar estadísticas") - print(" search - Buscar samples") - sys.exit(1) - - directory = sys.argv[1] - command = sys.argv[2] if len(sys.argv) > 2 else "scan" - - manager = SampleManager(directory) - - if command == "scan": - print(f"\nEscaneando: {directory}") - print("=" * 50) - - def progress(current, total, filename): - pct = (current / total) * 100 - print(f"\r[{pct:5.1f}%] {filename[:50]:<50}", end="", flush=True) - - stats = manager.scan_directory(progress_callback=progress) - print("\n") - print(f"Procesados: {stats['processed']}") - print(f"Agregados: {stats['added']}") - print(f"Actualizados: {stats['updated']}") - print(f"Errores: {stats['errors']}") - print(f"Total en librería: {stats['total_samples']}") - - elif command == "stats": - stats = manager.get_stats() - print("\nEstadísticas de la librería:") - print("=" * 50) - print(f"Total samples: {stats['total_samples']}") - print(f"Tamaño total: {stats['total_size'] / (1024**2):.1f} MB") - print(f"Último escaneo: {stats['last_scan']}") - print("\nPor categoría:") - for cat, count in sorted(stats['by_category'].items()): - print(f" {cat}: {count}") - print("\nPor key:") - for key, count in sorted(stats['by_key'].items()): - print(f" {key}: {count}") - - elif command == "search": - query = sys.argv[3] if len(sys.argv) > 3 else "" - print(f"\nBuscando: '{query}'") - print("=" * 50) - - results = manager.search(query=query, limit=20) - for s in results: - print(f"\n{s.name}") - print(f" Categoría: {s.category}/{s.subcategory}") - print(f" Key: {s.key or 'N/A'} | BPM: {s.bpm or 'N/A'}") - print(f" Path: {s.path}") diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/sample_system_demo.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/sample_system_demo.py deleted file mode 100644 index 3e70974..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/sample_system_demo.py +++ /dev/null @@ -1,244 +0,0 @@ -""" -Demo del Sistema de Gestión de Samples para AbletonMCP-AI - -Este script demuestra las capacidades del sistema completo de samples. -""" - -import sys -from pathlib import Path -sys.path.insert(0, str(Path(__file__).parent)) - -from sample_manager import get_manager -from sample_selector import get_selector -from audio_analyzer import analyze_sample, AudioAnalyzer - - -def demo_analyzer(): - """Demostración del analizador de audio""" - print("=" * 60) - print("DEMO: Audio Analyzer") - print("=" * 60) - - AudioAnalyzer(backend='basic') - - # Analizar un archivo de ejemplo - test_file = r"C:\Users\ren\embeddings\all_tracks\BBH - Primer Impacto - Kick 1.wav" - - print(f"\nAnalizando: {Path(test_file).name}") - print("-" * 40) - - try: - result = analyze_sample(test_file) - - print(f"Tipo detectado: {result['sample_type']}") - print(f"BPM: {result.get('bpm') or 'No detectado'}") - print(f"Key: {result.get('key') or 'No detectado'}") - print(f"Duración: {result['duration']:.3f}s") - print(f"Es percusivo: {result['is_percussive']}") - print(f"Géneros sugeridos: {', '.join(result['suggested_genres'])}") - - except Exception as e: - print(f"Error: {e}") - - print() - - -def demo_manager(): - """Demostración del gestor de samples""" - print("=" * 60) - print("DEMO: Sample Manager") - print("=" * 60) - - manager = get_manager(r"C:\Users\ren\embeddings\all_tracks") - - # Escanear librería - print("\nEscaneando librería...") - stats = manager.scan_directory() - print(f" Samples procesados: {stats['processed']}") - print(f" Nuevos: {stats['added']}") - print(f" Total en librería: {stats['total_samples']}") - - # Estadísticas - print("\nEstadísticas:") - stats = manager.get_stats() - print(f" Total: {stats['total_samples']} samples") - print(f" Tamaño: {stats['total_size'] / (1024**2):.1f} MB") - - if stats['by_category']: - print("\n Por categoría:") - for cat, count in sorted(stats['by_category'].items(), key=lambda x: -x[1]): - print(f" {cat}: {count}") - - if stats['by_key']: - print("\n Por key:") - for key, count in sorted(stats['by_key'].items(), key=lambda x: -x[1]): - print(f" {key}: {count}") - - # Búsquedas - print("\nBúsquedas:") - print("-" * 40) - - # Buscar kicks - kicks = manager.search(sample_type="kick", limit=3) - print(f"\nKicks encontrados: {len(kicks)}") - for s in kicks: - print(f" - {s.name}") - - # Buscar por key - g_sharp = manager.search(key="G#m", limit=3) - print(f"\nSamples en G#m: {len(g_sharp)}") - for s in g_sharp: - print(f" - {s.name} ({s.sample_type})") - - # Buscar por BPM - bpm_128 = manager.search(bpm=128, bpm_tolerance=5, limit=3) - print(f"\nSamples ~128 BPM: {len(bpm_128)}") - for s in bpm_128: - key_info = f" [{s.key}]" if s.key else "" - print(f" - {s.name}{key_info}") - - print() - - -def demo_selector(): - """Demostración del selector inteligente""" - print("=" * 60) - print("DEMO: Sample Selector") - print("=" * 60) - - selector = get_selector() - - # Seleccionar para diferentes géneros - genres = ['techno', 'house', 'tech-house'] - - for genre in genres: - print(f"\n{genre.upper()}:") - print("-" * 40) - - group = selector.select_for_genre(genre, key='Am', bpm=128) - - print(f" Key: {group.key} | BPM: {group.bpm}") - - # Drum kit - kit = group.drums - print("\n Drum Kit:") - if kit.kick: - print(f" Kick: {kit.kick.name}") - if kit.snare: - print(f" Snare: {kit.snare.name}") - if kit.clap: - print(f" Clap: {kit.clap.name}") - if kit.hat_closed: - print(f" Hat: {kit.hat_closed.name}") - - # Mapeo MIDI - mapping = selector.get_midi_mapping_for_kit(kit) - print("\n Mapeo MIDI:") - for note, info in sorted(mapping['notes'].items())[:4]: - if info['sample']: - print(f" Note {note}: {info['sample'][:40]}...") - - # Bass - if group.bass: - print(f"\n Bass ({len(group.bass)}):") - for s in group.bass[:2]: - key_info = f" [{s.key}]" if s.key else "" - print(f" - {s.name}{key_info}") - - # Cambio de key - print("\n" + "-" * 40) - print("Cambios de Key Sugeridos (desde Am):") - changes = ['fifth_up', 'fifth_down', 'relative', 'parallel'] - for change in changes: - new_key = selector.suggest_key_change('Am', change) - print(f" {change}: {new_key}") - - print() - - -def demo_compatibility(): - """Demostración de búsqueda de samples compatibles""" - print("=" * 60) - print("DEMO: Compatibilidad de Samples") - print("=" * 60) - - manager = get_manager() - selector = get_selector() - - # Encontrar un sample con key para usar de referencia - samples_with_key = manager.search(key="G#m", limit=1) - - if samples_with_key: - reference = samples_with_key[0] - print(f"\nSample de referencia: {reference.name}") - print(f" Key: {reference.key} | BPM: {reference.bpm}") - - # Buscar compatibles - compatible = selector.find_compatible_samples(reference, max_results=5) - - print("\nSamples compatibles:") - print("-" * 40) - - for sample, score in compatible: - bar_len = int(score * 20) - bar = "█" * bar_len + "░" * (20 - bar_len) - print(f" [{bar}] {score:.1%} - {sample.name}") - - print() - - -def demo_pack_generation(): - """Demostración de generación de packs""" - print("=" * 60) - print("DEMO: Generación de Sample Packs") - print("=" * 60) - - manager = get_manager() - - genres = ['techno', 'house', 'deep-house'] - - for genre in genres: - print(f"\n{genre.upper()} Pack:") - print("-" * 40) - - pack = manager.get_pack_for_genre(genre, key='Am', bpm=128) - - total = 0 - for category, samples in pack.items(): - if samples: - count = len(samples) - total += count - print(f" {category}: {count}") - - print(f" Total: {total} samples") - - print() - - -def main(): - """Ejecutar todas las demos""" - print("\n") - print("=" * 60) - print(" AbletonMCP-AI Sample System Demo ".center(60)) - print("=" * 60) - print() - - try: - demo_analyzer() - demo_manager() - demo_selector() - demo_compatibility() - demo_pack_generation() - - print("=" * 60) - print("Todas las demos completadas exitosamente!") - print("=" * 60) - - except Exception as e: - print(f"\nError en demo: {e}") - import traceback - traceback.print_exc() - - -if __name__ == "__main__": - main() diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/segment_rag_builder.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/segment_rag_builder.py deleted file mode 100644 index 3f97f68..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/segment_rag_builder.py +++ /dev/null @@ -1,198 +0,0 @@ -""" -segment_rag_builder.py - Build or refresh the persistent segment-audio index. -""" - -from __future__ import annotations - -import argparse -import json -import logging -from pathlib import Path - -from reference_listener import ReferenceAudioListener, export_segment_rag_manifest, generate_segment_rag_summary, _get_segment_rag_status, _backfill_segment_cache_metadata - - -logger = logging.getLogger(__name__) - - -def _default_library_dir() -> Path: - return Path(__file__).resolve().parents[2] / "librerias" / "all_tracks" - - -def main() -> int: - parser = argparse.ArgumentParser(description="Build the persistent segment-audio retrieval cache.") - parser.add_argument("--library-dir", default=str(_default_library_dir()), help="Audio library directory") - parser.add_argument("--roles", nargs="*", default=None, help="Subset of roles to index") - parser.add_argument("--max-files", type=int, default=None, help="Optional limit for targeted files") - parser.add_argument("--duration-limit", type=float, default=24.0, help="Max seconds per file during indexing") - parser.add_argument("--force", action="store_true", help="Rebuild even if persistent segment cache already exists") - parser.add_argument("--json", action="store_true", help="Emit full JSON report") - parser.add_argument("-v", "--verbose", action="store_true", help="Enable verbose output") - parser.add_argument("--offset", type=int, default=0, help="Skip first N files before starting (for chunked indexing)") - parser.add_argument("--batch-size", type=int, default=None, help="Process exactly N files then stop (for chunked indexing)") - parser.add_argument("--output-manifest", type=str, default=None, help="Path to save full manifest JSON") - parser.add_argument("--output-summary", type=str, default=None, help="Path to save summary report") - parser.add_argument("--resume", action="store_true", help="Resume from previous run state") - parser.add_argument("--export-manifest", type=str, default=None, - help="Export candidate manifest to FILE (format: .json or .md)") - parser.add_argument("--export-format", type=str, default="json", - choices=['json', 'markdown'], help="Manifest export format") - parser.add_argument("--status", action="store_true", help="Show current index status without building") - parser.add_argument("--backfill-metadata", action="store_true", help="Backfill metadata into existing cache files from indexing state") - parser.add_argument("--force-backfill", action="store_true", help="Force backfill even for files that already have metadata") - args = parser.parse_args() - - # Configure logging based on verbose flag - if args.verbose: - logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s') - else: - logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s') - - # Handle --status flag for early exit - if args.status: - status = _get_segment_rag_status(Path(args.library_dir)) - - if args.json: - print(json.dumps(status, indent=2, default=str)) - else: - print("=" * 60) - print("SEGMENT RAG INDEX STATUS") - print("=" * 60) - print(f"Cache Directory: {status['cache_dir']}") - print(f"Cache Files: {status['cache_files']}") - print(f"Total Indexed Segments: {status['total_segments']}") - print(f"Status: {status.get('status', 'unknown')}") - - if status.get('role_coverage'): - print("\nRole Coverage:") - for role, count in sorted(status['role_coverage'].items()): - print(f" {role}: {count} segments") - - if status.get('newest_entries'): - print(f"\nNewest Entries: {len(status['newest_entries'])} files") - for entry in status['newest_entries'][:5]: - print(f" - {entry['file_name']} ({entry['segments']} segments)") - - if status.get('oldest_entries'): - print(f"\nOldest Entries: {len(status['oldest_entries'])} files") - for entry in status['oldest_entries'][:5]: - print(f" - {entry['file_name']} ({entry['segments']} segments)") - - return 0 - - # Handle --backfill-metadata flag for early exit - if args.backfill_metadata: - result = _backfill_segment_cache_metadata(Path(args.library_dir), force=args.force_backfill) - - if args.json: - print(json.dumps(result, indent=2, default=str)) - else: - print("=" * 60) - print("SEGMENT CACHE METADATA BACKFILL") - print("=" * 60) - print(f"Cache Directory: {result['cache_dir']}") - print(f"Cache Files: {result['cache_files']}") - print(f"Backfilled: {result['backfilled']}") - print(f"Skipped: {result['skipped']}") - print(f"Errors: {result['errors']}") - print(f"Status: {result.get('status', 'unknown')}") - - return 0 - - listener = ReferenceAudioListener(args.library_dir) - report = listener.build_segment_rag_index( - roles=args.roles, - max_files=args.max_files, - duration_limit=args.duration_limit, - force=args.force, - offset=args.offset, - batch_size=args.batch_size, - resume=args.resume, - ) - - # Generate enhanced summary - summary = generate_segment_rag_summary(report, Path(args.library_dir)) - - if args.json: - print(json.dumps(summary, indent=2, default=str)) - else: - # Enhanced text output - print("=" * 60) - print("SEGMENT RAG INDEX COMPLETE") - print("=" * 60) - print(f"Device: {summary['device']}") - print(f"Cache: {summary['segment_index_dir']}") - print() - print(f"Files: {summary['files_targeted']} targeted") - print(f" Built: {summary['built']}") - print(f" Reused: {summary['reused']}") - print(f" Skipped: {summary['skipped']}") - print(f" Errors: {summary['errors']}") - print() - print(f"Total Segments: {summary['total_segments']}") - - if 'summary_stats' in summary: - stats = summary['summary_stats'] - print(f" Avg per file: {stats['avg_segments_per_file']:.1f}") - print(f" Range: {stats['min_segments']} - {stats['max_segments']}") - - if 'role_coverage' in summary: - print("\nRole Coverage:") - for role in sorted(summary['role_coverage'].keys()): - print(f" {role}: {summary['role_coverage'][role]} segments") - - if 'cache_info' in summary: - info = summary['cache_info'] - print(f"\nCache Size: {info['cache_size_mb']} MB") - - if args.offset > 0: - print(f"\nOffset: {args.offset}") - if args.batch_size is not None: - print(f"Batch Size: {args.batch_size}") - print(f"Files Remaining: {summary.get('files_remaining', 'unknown')}") - - # Save manifest if requested - if args.output_manifest: - manifest_path = Path(args.output_manifest) - manifest_path.parent.mkdir(parents=True, exist_ok=True) - with open(manifest_path, 'w') as f: - json.dump({ - "report": report, - "full_manifest": report.get("manifest", []), - }, f, indent=2) - if not args.json: - print(f"\nManifest saved to: {manifest_path}") - - # Save summary if requested - if args.output_summary: - summary_path = Path(args.output_summary) - summary_path.parent.mkdir(parents=True, exist_ok=True) - with open(summary_path, 'w') as f: - json.dump(summary, f, indent=2, default=str) - if not args.json: - print(f"Summary saved to: {summary_path}") - - # Export manifest in requested format - if args.export_manifest: - manifest_path = Path(args.export_manifest) - export_format = args.export_format - - # Determine format from extension if not specified - if not args.export_format or args.export_format == "json": - if manifest_path.suffix == '.md': - export_format = 'markdown' - else: - export_format = 'json' - - export_segment_rag_manifest( - report.get('manifest', []), - manifest_path, - format=export_format - ) - print(f"Manifest exported to: {manifest_path}") - - return 0 - - -if __name__ == "__main__": - raise SystemExit(main()) diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/server_v2.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/server_v2.py deleted file mode 100644 index 6c152db..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/server_v2.py +++ /dev/null @@ -1,1366 +0,0 @@ -""" -AbletonMCP AI Server v2 - Servidor MCP robusto para generación musical -Integra FastMCP con Ableton Live 12 via socket TCP y Max for Live via UDP - -Para ejecutar: - python -m AbletonMCP_AI.MCP_Server.server_v2 - -O con uv: - uv run python -m AbletonMCP_AI.MCP_Server.server_v2 -""" - -from mcp.server.fastmcp import FastMCP, Context -import socket -import json -import logging -import sys -from dataclasses import dataclass -from contextlib import asynccontextmanager -from typing import AsyncIterator, Dict, Any, List, Optional -from pathlib import Path -from datetime import datetime - -# Añadir el path para imports -sys.path.insert(0, str(Path(__file__).parent.parent)) - -try: -# from song_generator import SongGenerator, StyleConfig - from sample_index import SampleIndex -except ImportError as e: - print(f"Error importando módulos locales: {e}") - SongGenerator = None - SampleIndex = None - -# Configuración de logging -logging.basicConfig( - level=logging.INFO, - format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', - handlers=[ - logging.StreamHandler(), - logging.FileHandler(Path(__file__).parent / 'server_v2.log', mode='a') - ] -) -logger = logging.getLogger("AbletonMCP-AI-v2") - -# ============================================================================ -# CONSTANTES Y CONFIGURACIÓN -# ============================================================================ - -DEFAULT_ABLETON_PORT = 9877 -DEFAULT_MAX_PORT = 9879 -MAX_HOST = "127.0.0.1" -ABLETON_HOST = "localhost" -SAMPLES_DIR = r"C:\Users\ren\embeddings\all_tracks" - -# Colores por tipo de track -TRACK_COLORS = { - 'kick': 10, # Rojo - 'snare': 20, # Verde - 'hat': 5, # Amarillo - 'clap': 45, # Naranja - 'bass': 30, # Azul - 'synth': 50, # Rosa/Magenta - 'chords': 60, # Púrpura - 'fx': 25, # Verde claro - 'vocal': 15, # Naranja oscuro -} - -# Instrucciones para el productor (contexto de IA) -PRODUCER_INSTRUCTIONS = """ -Eres AbletonMCP-AI v2, un productor musical experto integrado con Ableton Live 12 y Max for Live. -Tu objetivo es crear música electrónica profesional mediante prompts en lenguaje natural. - -CAPACIDADES PRINCIPALES: -1. Generar tracks completos con estructura profesional (Intro, Build, Drop, Break, Outro) -2. Crear patrones MIDI para diferentes géneros (Techno, House, Trance, Tech-House, etc.) -3. Seleccionar y cargar samples apropiados desde la librería local -4. Enviar rutas de samples a Max for Live para carga dinámica -5. Configurar BPM, tonalidad y estructura musical -6. Controlar transporte (play, stop, tempo) -7. Crear clips y escenas en Ableton - -HERRAMIENTAS DISPONIBLES: -- generate_song(genre, style, bpm): Genera una canción completa -- load_sample_kit(genre): Carga un kit de samples para un género -- create_pattern(instrument, pattern_type): Crea patrones MIDI -- control_transport(action): Controla reproducción -- get_session_info(): Obtiene información de la sesión - -ESTILOS SOPORTADOS: -- Techno: Industrial, Peak Time, Dub, Minimal, Acid -- House: Deep, Tech-House, Progressive, Afro, Classic 90s -- Trance: Psy, Progressive, Uplifting -- Drum & Bass: Liquid, Neuro, Jump-up, Jungle - -FLUJO DE TRABAJO: -1. Analizar el prompt del usuario para extraer género, BPM, tonalidad, mood -2. Detectar samples disponibles en la librería -3. Generar patrones MIDI característicos del género -4. Enviar comandos a Ableton via socket TCP -5. Enviar rutas de samples a Max via UDP -6. Proporcionar feedback sobre lo creado - -REGLAS: -- Siempre verifica la conexión con Ableton antes de ejecutar comandos -- Usa valores por defecto razonables si el usuario no especifica -- Organiza los tracks con colores consistentes -- Maneja errores gracefully y proporciona mensajes útiles -- Loggea todas las operaciones para debugging -""".strip() - - -# ============================================================================ -# CLASES DE CONEXIÓN -# ============================================================================ - -@dataclass -class AbletonConnection: - """Gestiona la conexión TCP con Ableton Live""" - host: str = ABLETON_HOST - port: int = DEFAULT_ABLETON_PORT - sock: Optional[socket.socket] = None - connected: bool = False - last_error: Optional[str] = None - - def connect(self, timeout: float = 5.0) -> bool: - """Conecta al Remote Script de Ableton""" - if self.connected and self.sock: - return True - - try: - self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.sock.settimeout(timeout) - self.sock.connect((self.host, self.port)) - self.sock.settimeout(None) # Non-blocking después de conectar - self.connected = True - self.last_error = None - logger.info(f"Conectado a Ableton en {self.host}:{self.port}") - return True - except socket.timeout: - self.last_error = f"Timeout conectando a {self.host}:{self.port}" - logger.error(self.last_error) - self.sock = None - self.connected = False - return False - except Exception as e: - self.last_error = f"Error conectando a Ableton: {e}" - logger.error(self.last_error) - self.sock = None - self.connected = False - return False - - def disconnect(self): - """Desconecta de Ableton""" - if self.sock: - try: - self.sock.close() - except Exception as e: - logger.error(f"Error desconectando: {e}") - finally: - self.sock = None - self.connected = False - logger.info("Desconectado de Ableton") - - def send_command(self, command_type: str, params: Dict[str, Any] = None, - timeout: float = 15.0) -> Dict[str, Any]: - """Envía un comando a Ableton y retorna la respuesta""" - if not self.connected and not self.connect(): - return {"status": "error", "message": "No conectado a Ableton"} - - command = { - "type": command_type, - "params": params or {} - } - - try: - logger.debug(f"Enviando comando: {command_type}") - self.sock.sendall(json.dumps(command).encode('utf-8')) - - # Recibir respuesta - self.sock.settimeout(timeout) - chunks = [] - - while True: - try: - chunk = self.sock.recv(8192) - if not chunk: - break - chunks.append(chunk) - - # Intentar parsear JSON completo - try: - data = b''.join(chunks) - response = json.loads(data.decode('utf-8')) - return response - except json.JSONDecodeError: - continue - - except socket.timeout: - logger.warning("Timeout esperando respuesta") - break - - # Respuesta incompleta - if chunks: - data = b''.join(chunks) - try: - return json.loads(data.decode('utf-8')) - except Exception: - return {"status": "error", "message": "Respuesta JSON incompleta"} - else: - return {"status": "error", "message": "No se recibió respuesta"} - - except socket.error as e: - self.connected = False - self.last_error = f"Error de socket: {e}" - logger.error(self.last_error) - return {"status": "error", "message": str(e)} - except Exception as e: - self.connected = False - self.last_error = f"Error en comunicación: {e}" - logger.error(self.last_error) - return {"status": "error", "message": str(e)} - - -@dataclass -class MaxConnection: - """Gestiona la conexión UDP con Max for Live""" - host: str = MAX_HOST - port: int = DEFAULT_MAX_PORT - sock: Optional[socket.socket] = None - - def __post_init__(self): - self._init_socket() - - def _init_socket(self): - """Inicializa el socket UDP""" - try: - self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - logger.info(f"Socket UDP inicializado para Max en {self.host}:{self.port}") - except Exception as e: - logger.error(f"Error inicializando socket UDP: {e}") - self.sock = None - - def send_message(self, message: Dict[str, Any]) -> bool: - """Envía un mensaje JSON a Max for Live via UDP""" - if not self.sock: - self._init_socket() - if not self.sock: - return False - - try: - data = json.dumps(message).encode('utf-8') - self.sock.sendto(data, (self.host, self.port)) - logger.debug(f"Mensaje enviado a Max: {message.get('type', 'unknown')}") - return True - except Exception as e: - logger.error(f"Error enviando mensaje a Max: {e}") - return False - - def send_sample_path(self, track_index: int, sample_path: str, - slot: int = 0) -> bool: - """Envía una ruta de sample a Max para cargar""" - message = { - "type": "load_sample", - "track_index": track_index, - "sample_path": sample_path, - "slot": slot, - "timestamp": datetime.now().isoformat() - } - return self.send_message(message) - - def send_sample_kit(self, kit: Dict[str, List[Dict]]) -> bool: - """Envía un kit completo de samples a Max""" - message = { - "type": "load_sample_kit", - "kit": kit, - "timestamp": datetime.now().isoformat() - } - return self.send_message(message) - - def send_command(self, command: str, params: Dict[str, Any] = None) -> bool: - """Envía un comando genérico a Max""" - message = { - "type": "command", - "command": command, - "params": params or {}, - "timestamp": datetime.now().isoformat() - } - return self.send_message(message) - - -# ============================================================================ -# GESTORES GLOBALES -# ============================================================================ - -_ableton_connection: Optional[AbletonConnection] = None -_max_connection: Optional[MaxConnection] = None -_sample_index: Optional['SampleIndex'] = None -_song_generator: Optional['SongGenerator'] = None - - -def get_ableton_connection() -> AbletonConnection: - """Obtiene o crea la conexión con Ableton""" - global _ableton_connection - if _ableton_connection is None: - _ableton_connection = AbletonConnection() - return _ableton_connection - - -def get_max_connection() -> MaxConnection: - """Obtiene o crea la conexión con Max""" - global _max_connection - if _max_connection is None: - _max_connection = MaxConnection() - return _max_connection - - -def get_sample_index() -> Optional['SampleIndex']: - """Obtiene o crea el índice de samples""" - global _sample_index - if _sample_index is None and SampleIndex is not None: - try: - _sample_index = SampleIndex(SAMPLES_DIR) - except Exception as e: - logger.error(f"Error cargando índice de samples: {e}") - return _sample_index - - -def get_song_generator() -> Optional['SongGenerator']: - """Obtiene o crea el generador de canciones""" - global _song_generator - if _song_generator is None and SongGenerator is not None: - _song_generator = SongGenerator() - return _song_generator - - -# ============================================================================ -# LIFESPAN DEL SERVIDOR -# ============================================================================ - -@asynccontextmanager -async def server_lifespan(server: FastMCP) -> AsyncIterator[Dict[str, Any]]: - """Maneja el ciclo de vida del servidor""" - try: - logger.info("=" * 60) - logger.info("AbletonMCP-AI Server v2 iniciando...") - logger.info("=" * 60) - - # Intentar conectar a Ableton - try: - ableton = get_ableton_connection() - if ableton.connect(): - logger.info("Conectado a Ableton Live") - else: - logger.warning("No se pudo conectar a Ableton (¿está abierto el script?)") - except Exception as e: - logger.warning(f"Error conectando a Ableton: {e}") - - # Inicializar conexión con Max - try: - get_max_connection() - logger.info(f"Conexión UDP con Max lista en puerto {DEFAULT_MAX_PORT}") - except Exception as e: - logger.warning(f"Error inicializando conexión con Max: {e}") - - # Inicializar índice de samples - try: - sample_index = get_sample_index() - if sample_index: - logger.info(f"Índice de samples cargado: {len(sample_index.samples)} samples") - else: - logger.warning("Índice de samples no disponible") - except Exception as e: - logger.warning(f"Error cargando índice de samples: {e}") - - # Inicializar generador de canciones - try: - song_gen = get_song_generator() - if song_gen: - logger.info("Generador de canciones listo") - else: - logger.warning("Generador de canciones no disponible") - except Exception as e: - logger.warning(f"Error inicializando generador: {e}") - - yield { - "ableton": _ableton_connection, - "max": _max_connection, - "samples": _sample_index, - "generator": _song_generator - } - - finally: - global _ableton_connection, _max_connection - if _ableton_connection: - logger.info("Desconectando de Ableton...") - _ableton_connection.disconnect() - if _max_connection and _max_connection.sock: - logger.info("Cerrando socket UDP...") - _max_connection.sock.close() - logger.info("AbletonMCP-AI Server v2 detenido") - - -# ============================================================================ -# CREAR SERVIDOR MCP -# ============================================================================ - -mcp = FastMCP( - "AbletonMCP-AI-v2", - instructions=PRODUCER_INSTRUCTIONS, - lifespan=server_lifespan -) - - -# ============================================================================ -# HERRAMIENTAS MCP - GENERACIÓN DE CANCIONES -# ============================================================================ - -@mcp.tool() -def generate_song( - ctx: Context, - genre: str = "house", - style: str = "", - bpm: float = 0, - key: str = "", - structure: str = "standard" -) -> str: - """ - Genera una canción completa con estructura profesional - - Args: - genre: Género musical (techno, house, trance, tech-house, drum-and-bass) - style: Sub-género o estilo específico (e.g., "industrial", "deep", "90s", "minimal") - bpm: BPM deseado (0 = auto-seleccionar según género) - key: Tonalidad (e.g., "Am", "F#m", "C") - vacío = auto-seleccionar - structure: Estructura del track (standard, minimal, extended) - - Returns: - Resumen de la canción generada - - Ejemplos: - generate_song("techno", "industrial", 138, "F#m") - generate_song("house", "deep", 124, "Am") - generate_song("tech-house", "groovy", 126) - """ - try: - generator = get_song_generator() - if not generator: - return "Error: Generador de canciones no disponible" - - ableton = get_ableton_connection() - if not ableton.connect(): - return f"Error: No se pudo conectar a Ableton en {ABLETON_HOST}:{DEFAULT_ABLETON_PORT}" - - # Generar configuración - config = generator.generate_config(genre, style, bpm, key, structure) - - # Enviar comando a Ableton - response = ableton.send_command("generate_complete_song", { - "genre": genre, - "style": style or config.get('style', ''), - "bpm": config.get('bpm', 120), - "key": config.get('key', ''), - "structure": structure - }) - - if response.get("status") == "success": - summary = config.get("summary", "") - return f"Canción generada exitosamente!\n{summary}" - else: - return f"Error generando canción: {response.get('message', 'Error desconocido')}" - - except Exception as e: - logger.exception("Error en generate_song") - return f"Error: {str(e)}" - - -@mcp.tool() -def load_sample_kit( - ctx: Context, - genre: str = "techno", - key: str = "", - bpm: int = 0 -) -> str: - """ - Carga un kit de samples completo para un género específico - - Args: - genre: Género musical para seleccionar samples apropiados - key: Tonalidad preferida para samples armónicos - bpm: BPM preferido para samples con tempo específico - - Returns: - Lista de samples cargados - """ - try: - sample_index = get_sample_index() - if not sample_index: - return "Error: Índice de samples no disponible" - - max_conn = get_max_connection() - - # Obtener pack de samples - kit = sample_index.get_sample_pack(genre, key, bpm) - - # Contar samples encontrados - total_samples = sum(len(samples) for samples in kit.values()) - - if total_samples == 0: - return f"No se encontraron samples para el género '{genre}'" - - # Enviar a Max - if max_conn.send_sample_kit(kit): - # Construir resumen - lines = [f"Kit de samples para {genre} cargado:", ""] - for category, samples in kit.items(): - if samples: - lines.append(f"{category.upper()}:") - for s in samples[:2]: # Mostrar máximo 2 por categoría - lines.append(f" - {s['name']}") - if len(samples) > 2: - lines.append(f" ... y {len(samples)-2} más") - lines.append("") - lines.append(f"Total: {total_samples} samples enviados a Max") - return "\n".join(lines) - else: - return "Error enviando kit a Max for Live" - - except Exception as e: - logger.exception("Error en load_sample_kit") - return f"Error: {str(e)}" - - -@mcp.tool() -def create_pattern( - ctx: Context, - instrument: str, - pattern_type: str = "standard", - track_index: int = -1, - clip_index: int = 0, - length: float = 4.0, - key: str = "Am", - genre: str = "techno" -) -> str: - """ - Crea un patrón MIDI para un instrumento específico - - Args: - instrument: Tipo de instrumento (kick, snare, hat, clap, bass, chords, lead, melody) - pattern_type: Tipo de patrón (standard, minimal, full, complex, simple) - track_index: Índice del track (-1 = crear nuevo) - clip_index: Índice del clip/slot - length: Duración en beats - key: Tonalidad para instrumentos melódicos - genre: Género para estilo del patrón - - Returns: - Confirmación del patrón creado - """ - try: - generator = get_song_generator() - if not generator: - return "Error: Generador no disponible" - - ableton = get_ableton_connection() - if not ableton.connect(): - return "Error: No conectado a Ableton" - - # Crear track si es necesario - if track_index < 0: - response = ableton.send_command("create_midi_track", {"index": -1}) - if response.get("status") == "success": - track_index = response.get("result", {}).get("index", 0) - else: - return "Error creando track MIDI" - - # Crear clip - clip_response = ableton.send_command("create_clip", { - "track_index": track_index, - "clip_index": clip_index, - "length": length - }) - - if clip_response.get("status") != "success": - return f"Error creando clip: {clip_response.get('message')}" - - # Generar notas según instrumento - notes = [] - color = TRACK_COLORS.get(instrument.lower(), 0) - - if instrument.lower() in ['kick', 'bd', 'bass drum']: - notes = generator._create_kick_pattern(genre, pattern_type) - elif instrument.lower() in ['snare', 'sd', 'clap']: - notes = generator._create_clap_pattern(genre, pattern_type) - elif instrument.lower() in ['hat', 'hihat', 'hh']: - notes = generator._create_hat_pattern(genre, pattern_type) - elif instrument.lower() in ['perc', 'percussion']: - notes = generator._create_perc_pattern(genre, pattern_type) - elif instrument.lower() == 'bass': - notes = generator.create_bassline(key, pattern_type, length) - elif instrument.lower() in ['chords', 'chord', 'pads']: - notes = generator.create_chord_progression(key, genre, length) - elif instrument.lower() in ['lead', 'melody', 'synth']: - notes = generator.create_melody(key, 'minor', length, genre) - else: - return f"Instrumento '{instrument}' no reconocido" - - # Aplicar color al track - if color: - ableton.send_command("set_track_color", { - "track_index": track_index, - "color": color - }) - - # Agregar notas - notes_response = ableton.send_command("add_notes_to_clip", { - "track_index": track_index, - "clip_index": clip_index, - "notes": notes - }) - - if notes_response.get("status") == "success": - return f"Patrón '{pattern_type}' para {instrument} creado en track {track_index}, clip {clip_index} ({len(notes)} notas)" - else: - return f"Error agregando notas: {notes_response.get('message')}" - - except Exception as e: - logger.exception("Error en create_pattern") - return f"Error: {str(e)}" - - -@mcp.tool() -def control_transport( - ctx: Context, - action: str, - tempo: float = None -) -> str: - """ - Controla el transporte de Ableton (play, stop, tempo) - - Args: - action: Acción a ejecutar (play, stop, continue, toggle, set_tempo) - tempo: BPM a establecer (solo para action='set_tempo') - - Returns: - Confirmación de la acción - """ - try: - ableton = get_ableton_connection() - if not ableton.connect(): - return "Error: No conectado a Ableton" - - action = action.lower() - - if action == "play": - response = ableton.send_command("start_playback") - if response.get("status") == "success": - return "Reproducción iniciada" - elif action == "stop": - response = ableton.send_command("stop_playback") - if response.get("status") == "success": - return "Reproducción detenida" - elif action == "continue": - response = ableton.send_command("continue_playback") - if response.get("status") == "success": - return "Reproducción continuada" - elif action in ["set_tempo", "tempo", "bpm"]: - if tempo is None or tempo <= 0: - return "Error: Debes especificar un tempo válido" - response = ableton.send_command("set_tempo", {"tempo": tempo}) - if response.get("status") == "success": - return f"Tempo establecido a {tempo} BPM" - elif action == "get_tempo": - response = ableton.send_command("get_session_info") - if response.get("status") == "success": - return f"Tempo actual: {response.get('result', {}).get('tempo', 'desconocido')} BPM" - else: - return f"Acción '{action}' no reconocida. Usa: play, stop, continue, set_tempo" - - return f"Error: {response.get('message', 'Error desconocido')}" - - except Exception as e: - logger.exception("Error en control_transport") - return f"Error: {str(e)}" - - -@mcp.tool() -def get_session_info(ctx: Context) -> str: - """ - Obtiene información completa de la sesión actual de Ableton - - Returns: - JSON con información de la sesión (tempo, tracks, estado de reproducción) - """ - try: - ableton = get_ableton_connection() - if not ableton.connect(): - return f"Error: No conectado a Ableton en {ABLETON_HOST}:{DEFAULT_ABLETON_PORT}" - - response = ableton.send_command("get_session_info") - - if response.get("status") == "success": - result = response.get("result", {}) - info_lines = [ - "Información de la sesión:", - f" Tempo: {result.get('tempo', 'N/A')} BPM", - f" Reproduciendo: {'Sí' if result.get('is_playing') else 'No'}", - f" Tracks: {result.get('num_tracks', 'N/A')}", - ] - if 'current_song_time' in result: - info_lines.append(f" Tiempo: {result.get('current_song_time')} beats") - return "\n".join(info_lines) - else: - return f"Error: {response.get('message', 'Error desconocido')}" - - except Exception as e: - logger.exception("Error en get_session_info") - return f"Error: {str(e)}" - - -# ============================================================================ -# HERRAMIENTAS MCP - GESTIÓN DE SAMPLES -# ============================================================================ - -@mcp.tool() -def search_samples( - ctx: Context, - query: str, - category: str = "", - limit: int = 10 -) -> str: - """ - Busca samples en la librería local - - Args: - query: Término de búsqueda (e.g., "kick", "bass", "hat") - category: Categoría (kick, snare, hat, bass, synth, percussion, vocal) - limit: Número máximo de resultados - - Returns: - Lista de samples encontrados - """ - try: - sample_index = get_sample_index() - if not sample_index: - return "Error: Índice de samples no disponible" - - results = sample_index.search(query, category, limit) - - if not results: - return f"No se encontraron samples para '{query}'" - - output = [f"Samples encontrados para '{query}':\n"] - for i, sample in enumerate(results, 1): - output.append(f"{i}. {sample['name']} ({sample['category']})") - output.append(f" Path: {sample['path']}") - if sample.get('key'): - output.append(f" Key: {sample['key']}, BPM: {sample.get('bpm', 'N/A')}") - output.append("") - - return "\n".join(output) - - except Exception as e: - logger.exception("Error en search_samples") - return f"Error: {str(e)}" - - -@mcp.tool() -def get_random_sample( - ctx: Context, - category: str = "" -) -> str: - """ - Obtiene un sample aleatorio de la librería - - Args: - category: Categoría opcional para filtrar - - Returns: - Información del sample seleccionado - """ - try: - sample_index = get_sample_index() - if not sample_index: - return "Error: Índice de samples no disponible" - - sample = sample_index.get_random_sample(category) - - if not sample: - return f"No hay samples disponibles{' en categoría ' + category if category else ''}" - - return f"""Sample aleatorio seleccionado: -Nombre: {sample['name']} -Categoría: {sample['category']} -Path: {sample['path']} -Key: {sample.get('key', 'N/A')} -BPM: {sample.get('bpm', 'N/A')}""" - - except Exception as e: - logger.exception("Error en get_random_sample") - return f"Error: {str(e)}" - - -@mcp.tool() -def send_sample_to_max( - ctx: Context, - sample_path: str, - track_index: int = 0, - slot: int = 0 -) -> str: - """ - Envía una ruta de sample a Max for Live para cargar - - Args: - sample_path: Ruta completa del archivo de audio - track_index: Índice del track donde cargar - slot: Slot/clip donde cargar el sample - - Returns: - Confirmación del envío - """ - try: - max_conn = get_max_connection() - - if max_conn.send_sample_path(track_index, sample_path, slot): - return f"Sample enviado a Max: {Path(sample_path).name} -> Track {track_index}, Slot {slot}" - else: - return "Error enviando sample a Max" - - except Exception as e: - logger.exception("Error en send_sample_to_max") - return f"Error: {str(e)}" - - -@mcp.tool() -def refresh_sample_index(ctx: Context) -> str: - """ - Refresca el índice de samples escaneando el directorio nuevamente - - Returns: - Confirmación con el número de samples encontrados - """ - try: - global _sample_index - if SampleIndex is None: - return "Error: Módulo SampleIndex no disponible" - - _sample_index = SampleIndex(SAMPLES_DIR) - _sample_index.refresh() - - return f"Índice refrescado: {len(_sample_index.samples)} samples encontrados" - - except Exception as e: - logger.exception("Error en refresh_sample_index") - return f"Error: {str(e)}" - - -# ============================================================================ -# HERRAMIENTAS MCP - CREACIÓN AVANZADA -# ============================================================================ - -@mcp.tool() -def create_drum_pattern( - ctx: Context, - track_index: int, - clip_index: int, - style: str = "techno", - pattern_type: str = "full", - length: float = 4.0 -) -> str: - """ - Crea un patrón de batería completo - - Args: - track_index: Índice del track MIDI donde crear el patrón - clip_index: Índice del clip/slot - style: Estilo (techno, house, trance, minimal) - pattern_type: Tipo de patrón (full, kick-only, hats-only, minimal) - length: Duración en beats - - Returns: - Confirmación del patrón creado - """ - try: - generator = get_song_generator() - if not generator: - return "Error: Generador no disponible" - - ableton = get_ableton_connection() - if not ableton.connect(): - return "Error: No conectado a Ableton" - - notes = generator.create_drum_pattern(style, pattern_type, length) - - # Crear clip - clip_response = ableton.send_command("create_clip", { - "track_index": track_index, - "clip_index": clip_index, - "length": length - }) - - if clip_response.get("status") != "success": - return f"Error creando clip: {clip_response.get('message')}" - - # Agregar notas - notes_response = ableton.send_command("add_notes_to_clip", { - "track_index": track_index, - "clip_index": clip_index, - "notes": notes - }) - - if notes_response.get("status") == "success": - return f"Patrón de batería '{style}' creado ({len(notes)} notas)" - else: - return f"Error agregando notas: {notes_response.get('message')}" - - except Exception as e: - logger.exception("Error en create_drum_pattern") - return f"Error: {str(e)}" - - -@mcp.tool() -def create_bassline( - ctx: Context, - track_index: int, - clip_index: int, - key: str, - style: str = "rolling", - length: float = 16.0 -) -> str: - """ - Crea una línea de bajo musical - - Args: - track_index: Índice del track MIDI - clip_index: Índice del clip - key: Tonalidad (e.g., "Am", "F#m", "C") - style: Estilo (rolling, minimal, acid, walking, offbeat) - length: Duración en beats - - Returns: - Confirmación del bassline creado - """ - try: - generator = get_song_generator() - if not generator: - return "Error: Generador no disponible" - - ableton = get_ableton_connection() - if not ableton.connect(): - return "Error: No conectado a Ableton" - - notes = generator.create_bassline(key, style, length) - - # Crear clip - clip_response = ableton.send_command("create_clip", { - "track_index": track_index, - "clip_index": clip_index, - "length": length - }) - - if clip_response.get("status") != "success": - return f"Error creando clip: {clip_response.get('message')}" - - # Agregar notas - notes_response = ableton.send_command("add_notes_to_clip", { - "track_index": track_index, - "clip_index": clip_index, - "notes": notes - }) - - if notes_response.get("status") == "success": - return f"Bassline '{style}' en {key} creado ({len(notes)} notas)" - else: - return f"Error agregando notas: {notes_response.get('message')}" - - except Exception as e: - logger.exception("Error en create_bassline") - return f"Error: {str(e)}" - - -@mcp.tool() -def create_chord_progression( - ctx: Context, - track_index: int, - clip_index: int, - key: str, - progression_type: str = "techno", - length: float = 16.0 -) -> str: - """ - Crea una progresión de acordes - - Args: - track_index: Índice del track MIDI - clip_index: Índice del clip - key: Tonalidad (e.g., "Am", "F#m", "C") - progression_type: Tipo (techno, house, deep, minor) - length: Duración en beats (usualmente 16 = 4 compases) - - Returns: - Confirmación de la progresión creada - """ - try: - generator = get_song_generator() - if not generator: - return "Error: Generador no disponible" - - ableton = get_ableton_connection() - if not ableton.connect(): - return "Error: No conectado a Ableton" - - notes = generator.create_chord_progression(key, progression_type, length) - - # Crear clip - clip_response = ableton.send_command("create_clip", { - "track_index": track_index, - "clip_index": clip_index, - "length": length - }) - - if clip_response.get("status") != "success": - return f"Error creando clip: {clip_response.get('message')}" - - # Agregar notas - notes_response = ableton.send_command("add_notes_to_clip", { - "track_index": track_index, - "clip_index": clip_index, - "notes": notes - }) - - if notes_response.get("status") == "success": - return f"Progresión '{progression_type}' en {key} creada ({len(notes)} notas)" - else: - return f"Error agregando notas: {notes_response.get('message')}" - - except Exception as e: - logger.exception("Error en create_chord_progression") - return f"Error: {str(e)}" - - -# ============================================================================ -# HERRAMIENTAS MCP - GESTIÓN DE TRACKS Y CLIPS -# ============================================================================ - -@mcp.tool() -def create_midi_track( - ctx: Context, - name: str = "MIDI Track", - color: int = None -) -> str: - """ - Crea un nuevo track MIDI - - Args: - name: Nombre del track - color: Color del track (0-69, opcional) - - Returns: - Confirmación con el índice del track creado - """ - try: - ableton = get_ableton_connection() - if not ableton.connect(): - return "Error: No conectado a Ableton" - - response = ableton.send_command("create_midi_track", {"index": -1}) - - if response.get("status") == "success": - track_index = response.get("result", {}).get("index", 0) - - # Setear nombre - ableton.send_command("set_track_name", { - "track_index": track_index, - "name": name - }) - - # Setear color si se especificó - if color is not None: - ableton.send_command("set_track_color", { - "track_index": track_index, - "color": color - }) - - return f"Track MIDI '{name}' creado en índice {track_index}" - else: - return f"Error: {response.get('message')}" - - except Exception as e: - logger.exception("Error en create_midi_track") - return f"Error: {str(e)}" - - -@mcp.tool() -def create_audio_track( - ctx: Context, - name: str = "Audio Track", - color: int = None -) -> str: - """ - Crea un nuevo track de audio - - Args: - name: Nombre del track - color: Color del track (0-69, opcional) - - Returns: - Confirmación con el índice del track creado - """ - try: - ableton = get_ableton_connection() - if not ableton.connect(): - return "Error: No conectado a Ableton" - - response = ableton.send_command("create_audio_track", {"index": -1}) - - if response.get("status") == "success": - track_index = response.get("result", {}).get("index", 0) - - # Setear nombre - ableton.send_command("set_track_name", { - "track_index": track_index, - "name": name - }) - - # Setear color si se especificó - if color is not None: - ableton.send_command("set_track_color", { - "track_index": track_index, - "color": color - }) - - return f"Track de audio '{name}' creado en índice {track_index}" - else: - return f"Error: {response.get('message')}" - - except Exception as e: - logger.exception("Error en create_audio_track") - return f"Error: {str(e)}" - - -@mcp.tool() -def set_track_volume( - ctx: Context, - track_index: int, - volume: float -) -> str: - """ - Ajusta el volumen de un track (0.0 - 1.0) - - Args: - track_index: Índice del track - volume: Volumen entre 0.0 y 1.0 - - Returns: - Confirmación del cambio - """ - try: - ableton = get_ableton_connection() - if not ableton.connect(): - return "Error: No conectado a Ableton" - - response = ableton.send_command("set_track_volume", { - "track_index": track_index, - "volume": volume - }) - - if response.get("status") == "success": - return f"Volumen del track {track_index} ajustado a {volume:.2f}" - else: - return f"Error: {response.get('message')}" - - except Exception as e: - logger.exception("Error en set_track_volume") - return f"Error: {str(e)}" - - -@mcp.tool() -def fire_clip( - ctx: Context, - track_index: int, - clip_index: int -) -> str: - """ - Dispara/reproduce un clip específico - - Args: - track_index: Índice del track - clip_index: Índice del clip/slot - - Returns: - Confirmación - """ - try: - ableton = get_ableton_connection() - if not ableton.connect(): - return "Error: No conectado a Ableton" - - response = ableton.send_command("fire_clip", { - "track_index": track_index, - "clip_index": clip_index - }) - - if response.get("status") == "success": - return f"Clip en track {track_index}, slot {clip_index} disparado" - else: - return f"Error: {response.get('message')}" - - except Exception as e: - logger.exception("Error en fire_clip") - return f"Error: {str(e)}" - - -@mcp.tool() -def fire_scene( - ctx: Context, - scene_index: int -) -> str: - """ - Dispara una scene (todos sus clips) - - Args: - scene_index: Índice de la scene - - Returns: - Confirmación - """ - try: - ableton = get_ableton_connection() - if not ableton.connect(): - return "Error: No conectado a Ableton" - - response = ableton.send_command("fire_scene", { - "scene_index": scene_index - }) - - if response.get("status") == "success": - return f"Scene {scene_index} disparada" - else: - return f"Error: {response.get('message')}" - - except Exception as e: - logger.exception("Error en fire_scene") - return f"Error: {str(e)}" - - -# ============================================================================ -# HERRAMIENTAS MCP - UTILIDADES -# ============================================================================ - -@mcp.tool() -def get_available_samples(ctx: Context) -> str: - """ - Obtiene un resumen de los samples disponibles en la librería - - Returns: - Resumen por categorías - """ - try: - sample_index = get_sample_index() - if not sample_index: - return "Error: Índice de samples no disponible" - - categories = {} - for sample in sample_index.samples: - cat = sample['category'] - categories[cat] = categories.get(cat, 0) + 1 - - lines = ["Samples disponibles:", ""] - for cat, count in sorted(categories.items(), key=lambda x: -x[1]): - lines.append(f" {cat}: {count}") - lines.append("") - lines.append(f"Total: {len(sample_index.samples)} samples") - - return "\n".join(lines) - - except Exception as e: - logger.exception("Error en get_available_samples") - return f"Error: {str(e)}" - - -@mcp.tool() -def test_connections(ctx: Context) -> str: - """ - Prueba las conexiones con Ableton y Max - - Returns: - Estado de las conexiones - """ - results = [] - - # Probar Ableton - try: - ableton = get_ableton_connection() - if ableton.connect(timeout=3.0): - results.append("Ableton: Conectado") - # Probar comando simple - resp = ableton.send_command("get_session_info") - if resp.get("status") == "success": - results.append(f" - Tempo: {resp.get('result', {}).get('tempo')} BPM") - results.append(f" - Tracks: {resp.get('result', {}).get('num_tracks')}") - else: - results.append(f"Ableton: No conectado ({ableton.last_error})") - except Exception as e: - results.append(f"Ableton: Error - {e}") - - # Probar Max - try: - max_conn = get_max_connection() - if max_conn.send_message({"type": "ping", "timestamp": datetime.now().isoformat()}): - results.append(f"Max for Live: Conexión UDP lista en puerto {DEFAULT_MAX_PORT}") - else: - results.append("Max for Live: Error enviando mensaje") - except Exception as e: - results.append(f"Max for Live: Error - {e}") - - # Probar Samples - try: - sample_index = get_sample_index() - if sample_index: - results.append(f"Samples: {len(sample_index.samples)} samples indexados") - else: - results.append("Samples: Índice no disponible") - except Exception as e: - results.append(f"Samples: Error - {e}") - - return "\n".join(results) - - -# ============================================================================ -# MAIN -# ============================================================================ - -def main(): - """Punto de entrada principal""" - import argparse - - parser = argparse.ArgumentParser(description="AbletonMCP-AI Server v2") - parser.add_argument("--port", type=int, default=0, help="Puerto para el servidor MCP (0 = auto)") - parser.add_argument("--transport", type=str, default="stdio", - choices=["stdio", "sse"], help="Transporte MCP") - parser.add_argument("--test", action="store_true", help="Probar conexiones y salir") - args = parser.parse_args() - - print("=" * 60) - print("AbletonMCP-AI Server v2") - print("=" * 60) - print(f"Transporte: {args.transport}") - print(f"Ableton: {ABLETON_HOST}:{DEFAULT_ABLETON_PORT}") - print(f"Max UDP: {MAX_HOST}:{DEFAULT_MAX_PORT}") - print(f"Samples: {SAMPLES_DIR}") - print("-" * 60) - - if args.test: - print("\nProbando conexiones...") - # Crear contexto temporal para test - ctx = Context(request_context={}) - result = test_connections(ctx) - print(result) - return - - # Iniciar servidor MCP - mcp.run(transport=args.transport) - - -if __name__ == "__main__": - main() diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/socket_smoke_test.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/socket_smoke_test.py deleted file mode 100644 index df16288..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/socket_smoke_test.py +++ /dev/null @@ -1,798 +0,0 @@ -import argparse -import json -import socket -from datetime import datetime -from typing import Any, Dict, List, Tuple - -try: - from song_generator import SongGenerator -except ImportError: - SongGenerator = None - - -STRUCTURE_SCENE_COUNTS = { - "minimal": 4, - "standard": 6, - "extended": 7, -} - -# Expected buses for Phase 7 validation -EXPECTED_BUSES = ["drums", "bass", "music", "vocal", "fx"] - -EXPECTED_CRITICAL_ROLES = {"kick", "bass", "clap", "hat"} - -EXPECTED_AUDIO_FX_LAYERS = ["AUDIO ATMOS", "AUDIO CRASH FX", "AUDIO TRANSITION FILL"] - -EXPECTED_BUS_NAMES = ["DRUMS", "BASS", "MUSIC"] - -MIN_TRACKS_FOR_EXPORT = 6 -MIN_BUSES_FOR_EXPORT = 3 -MIN_RETURNS_FOR_EXPORT = 2 -MASTER_VOLUME_RANGE = (0.75, 0.95) - -# Expected AUDIO RESAMPLE track names -AUDIO_RESAMPLE_TRACKS = [ - "AUDIO RESAMPLE REVERSE FX", - "AUDIO RESAMPLE RISER", - "AUDIO RESAMPLE DOWNLIFTER", - "AUDIO RESAMPLE STUTTER", -] - -# Bus routing map: track role -> expected bus output -BUS_ROUTING_MAP = { - "kick": {"drums"}, - "snare": {"drums"}, - "clap": {"drums"}, - "hat": {"drums"}, - "perc": {"drums"}, - "sub_bass": {"bass"}, - "bass": {"bass"}, - "chords": {"music"}, - "pad": {"music"}, - "pluck": {"music"}, - "lead": {"music"}, - "vocal": {"vocal"}, - "vocal_chop": {"vocal"}, - "reverse_fx": {"fx"}, - "riser": {"fx"}, - "impact": {"fx"}, - "atmos": {"fx"}, - "crash": {"drums", "fx"}, -} - - -def _extract_bus_payload(payload: Any) -> List[Dict[str, Any]]: - if isinstance(payload, list): - return [item for item in payload if isinstance(item, dict)] - if isinstance(payload, dict): - buses = payload.get("buses", []) - if isinstance(buses, list): - return [item for item in buses if isinstance(item, dict)] - return [] - - -def _normalize_bus_key(name: str) -> str: - normalized = "".join(ch for ch in (name or "").lower() if ch.isalnum()) - if not normalized: - return "" - if "drum" in normalized or "groove" in normalized: - return "drums" - if "bass" in normalized or "tube" in normalized or "subdeep" in normalized: - return "bass" - if "music" in normalized or "wide" in normalized: - return "music" - if "vocal" in normalized or "vox" in normalized or "tail" in normalized: - return "vocal" - if "fx" in normalized or "wash" in normalized: - return "fx" - return "" - - -def _canonical_track_name(name: str) -> str: - text = (name or "").strip().lower() - if not text: - return "" - if " (" in text: - text = text.split(" (", 1)[0].strip() - return text - - -class AbletonSocketClient: - def __init__(self, host: str = "127.0.0.1", port: int = 9877, timeout: float = 15.0): - self.host = host - self.port = port - self.timeout = timeout - - def send(self, command_type: str, params: Dict[str, Any] = None) -> Dict[str, Any]: - payload = json.dumps({ - "type": command_type, - "params": params or {}, - }).encode("utf-8") + b"\n" - - with socket.create_connection((self.host, self.port), timeout=self.timeout) as sock: - sock.sendall(payload) - reader = sock.makefile("r", encoding="utf-8") - try: - line = reader.readline() - finally: - reader.close() - try: - sock.shutdown(socket.SHUT_RDWR) - except OSError: - pass - - if not line: - raise RuntimeError(f"No response for command: {command_type}") - - return json.loads(line) - - -def expect_success(name: str, response: Dict[str, Any]) -> Dict[str, Any]: - if response.get("status") != "success": - raise RuntimeError(f"{name} failed: {response}") - return response.get("result", {}) - - -class TestResult: - """Tracks test results for reporting.""" - def __init__(self): - self.passed: List[Tuple[str, str]] = [] - self.failed: List[Tuple[str, str]] = [] - self.skipped: List[Tuple[str, str]] = [] - self.warnings: List[Tuple[str, str]] = [] - - def add_pass(self, name: str, details: str = ""): - self.passed.append((name, details)) - - def add_fail(self, name: str, error: str): - self.failed.append((name, error)) - - def add_skip(self, name: str, reason: str): - self.skipped.append((name, reason)) - - def add_warning(self, name: str, message: str): - self.warnings.append((name, message)) - - def to_dict(self) -> Dict[str, Any]: - return { - "summary": { - "total": len(self.passed) + len(self.failed) + len(self.skipped) + len(self.warnings), - "passed": len(self.passed), - "failed": len(self.failed), - "skipped": len(self.skipped), - "warnings": len(self.warnings), - "status": "PASS" if len(self.failed) == 0 else "FAIL", - }, - "passed_tests": [{"name": n, "details": d} for n, d in self.passed], - "failed_tests": [{"name": n, "error": d} for n, d in self.failed], - "skipped_tests": [{"name": n, "reason": d} for n, d in self.skipped], - "warnings": [{"name": n, "message": d} for n, d in self.warnings], - } - - def print_report(self): - print("\n" + "=" * 60) - print("PHASE 7 SMOKE TEST REPORT") - print("=" * 60) - print(f"Timestamp: {datetime.now().isoformat()}") - print(f"Total: {len(self.passed) + len(self.failed) + len(self.skipped) + len(self.warnings)}") - print(f"Passed: {len(self.passed)}") - print(f"Failed: {len(self.failed)}") - print(f"Skipped: {len(self.skipped)}") - print(f"Warnings: {len(self.warnings)}") - print("-" * 60) - - if self.passed: - print("\n[PASSED]") - for name, details in self.passed: - print(f" [OK] {name}: {details}") - - if self.failed: - print("\n[FAILED]") - for name, error in self.failed: - print(f" [FAIL] {name}: {error}") - - if self.warnings: - print("\n[WARNINGS]") - for name, message in self.warnings: - print(f" [WARN] {name}: {message}") - - if self.skipped: - print("\n[SKIPPED]") - for name, reason in self.skipped: - print(f" [SKIP] {name}: {reason}") - - print("\n" + "=" * 60) - status = "PASS" if len(self.failed) == 0 else "FAIL" - print(f"FINAL STATUS: {status}") - print("=" * 60 + "\n") - - -def run_readonly_checks(client: AbletonSocketClient) -> List[Tuple[str, str]]: - checks = [] - - expect_success("get_session_info", client.send("get_session_info")) - checks.append(( - "get_session_info", -# f"tempo={session.get('tempo')} tracks={session.get('num_tracks')} scenes={session.get('num_scenes')}", - )) - - tracks = expect_success("get_tracks", client.send("get_tracks")) - checks.append(("get_tracks", f"tracks={len(tracks)}")) - - return checks - - -def run_generation_check( - client: AbletonSocketClient, - genre: str, - style: str, - bpm: float, - key: str, - structure: str, - use_blueprint: bool = False, -) -> List[Tuple[str, str]]: - checks = [] - params = { - "genre": genre, - "style": style, - "bpm": bpm, - "key": key, - "structure": structure, - } - - if use_blueprint and SongGenerator is not None: - params = SongGenerator().generate_config(genre, style, bpm, key, structure) - - result = expect_success( - "generate_complete_song", - client.send("generate_complete_song", params), - ) - checks.append(( - "generate_complete_song", - f"tracks={result.get('tracks')} scenes={result.get('scenes')} structure={result.get('structure')}", - )) - - session = expect_success("post_generate_session_info", client.send("get_session_info")) - actual_scenes = session.get("num_scenes") - expected_scenes = len(params.get("sections", [])) if use_blueprint and isinstance(params, dict) and params.get("sections") else STRUCTURE_SCENE_COUNTS.get(structure.lower()) - if expected_scenes is not None and actual_scenes != expected_scenes: - raise RuntimeError( - f"scene count mismatch after generate_complete_song: expected {expected_scenes}, got {actual_scenes}" - ) - - checks.append(( - "post_generate_session_info", - f"tracks={session.get('num_tracks')} scenes={actual_scenes}", - )) - - return checks - - -def run_bus_checks(client: AbletonSocketClient, results: TestResult) -> None: - """Verify buses are created correctly.""" - try: - buses_payload = expect_success("list_buses", client.send("list_buses")) - buses = _extract_bus_payload(buses_payload) - bus_keys = {_normalize_bus_key(bus.get("name", "")) for bus in buses} - bus_keys.discard("") - - found_buses = [] - missing_buses = [] - for expected in EXPECTED_BUSES: - if expected in bus_keys: - found_buses.append(expected) - else: - missing_buses.append(expected) - - if found_buses: - results.add_pass("buses_found", f"found={found_buses}") - - if missing_buses: - # Not a failure if buses don't exist yet - they may be created during generation - results.add_skip("buses_missing", f"not_found={missing_buses} (may be created during generation)") - else: - results.add_pass("buses_complete", "all expected buses present") - - except Exception as e: - results.add_fail("buses_check", str(e)) - - -def run_routing_checks(client: AbletonSocketClient, results: TestResult) -> None: - """Verify track routing is configured correctly.""" - try: - tracks = expect_success("get_tracks", client.send("get_tracks")) - - if not tracks: - results.add_skip("routing_check", "no tracks to verify routing") - return - - correct_routing = 0 - incorrect_routing = [] - no_routing = 0 - - for track in tracks: - original_track_name = track.get("name", "") - track_name = _canonical_track_name(original_track_name) - output_routing = track.get("current_output_routing", "") - output_bus_key = _normalize_bus_key(output_routing) - track_bus_key = _normalize_bus_key(track_name) - - if output_routing and output_routing.lower() != "master": - correct_routing += 1 - elif not output_routing: - no_routing += 1 - - if track_bus_key: - continue - - for role, expected_bus in BUS_ROUTING_MAP.items(): - if role in track_name: - if output_bus_key in expected_bus: - correct_routing += 1 - elif output_routing.lower() != "master": - expected_label = "/".join(sorted(expected_bus)) - incorrect_routing.append(f"{original_track_name.lower()} -> {output_routing} (expected {expected_label})") - - results.add_pass("routing_summary", f"correct={correct_routing} no_routing={no_routing}") - - if incorrect_routing: - results.add_fail("routing_mismatches", ", ".join(incorrect_routing[:5])) - elif correct_routing > 0: - results.add_pass("routing_correct", f"{correct_routing} tracks with non-master routing") - - except Exception as e: - results.add_fail("routing_check", str(e)) - - -def run_audio_resample_checks(client: AbletonSocketClient, results: TestResult) -> None: - """Verify AUDIO RESAMPLE tracks exist.""" - try: - tracks = expect_success("get_tracks", client.send("get_tracks")) - track_names = [t.get("name", "") for t in tracks] - - found_layers = [] - missing_layers = [] - - for expected in AUDIO_RESAMPLE_TRACKS: - if any(expected.upper() in name.upper() for name in track_names): - found_layers.append(expected) - else: - missing_layers.append(expected) - - if found_layers: - results.add_pass("audio_resample_found", f"layers={found_layers}") - - if missing_layers: - results.add_skip("audio_resample_missing", f"not_found={missing_layers} (may require reference audio)") - else: - results.add_pass("audio_resample_complete", "all 4 resample layers present") - - # Verify they are audio tracks - for track in tracks: - name = track.get("name", "").upper() - if "AUDIO RESAMPLE" in name: - if track.get("has_audio_input"): - results.add_pass(f"audio_track_type_{name[:20]}", "correct audio track type") - else: - results.add_fail(f"audio_track_type_{name[:20]}", "expected audio track") - - except Exception as e: - results.add_fail("audio_resample_check", str(e)) - - -def run_automation_snapshot_checks(client: AbletonSocketClient, results: TestResult) -> None: - """Verify automation and device parameter snapshots.""" - try: - tracks = expect_success("get_tracks", client.send("get_tracks")) - - total_devices = 0 - tracks_with_devices = 0 - tracks_with_automation = 0 - - for track in tracks: - num_devices = track.get("num_devices", 0) - if num_devices > 0: - total_devices += num_devices - tracks_with_devices += 1 - - # Check for arrangement clips (may contain automation) - arrangement_clips = track.get("arrangement_clip_count", 0) - if arrangement_clips > 0: - tracks_with_automation += 1 - - if tracks_with_devices > 0: - results.add_pass("automation_devices", f"tracks_with_devices={tracks_with_devices} total_devices={total_devices}") - else: - results.add_skip("automation_devices", "no devices found") - - if tracks_with_automation > 0: - results.add_pass("automation_clips", f"tracks_with_arrangement_clips={tracks_with_automation}") - else: - results.add_skip("automation_clips", "no arrangement clips (may need to commit to arrangement)") - - # Try to get device parameters for first track with devices - for i, track in enumerate(tracks): - if track.get("num_devices", 0) > 0: - try: - devices = expect_success("get_devices", client.send("get_devices", {"track_index": i})) - if devices: - params_sample = [] - for dev in devices[:3]: - params = dev.get("parameters", []) - if params: - params_sample.append(f"{dev.get('name', '?')}:{len(params)}params") - if params_sample: - results.add_pass("automation_params_snapshot", ", ".join(params_sample[:3])) - break - except Exception: - pass - break - - except Exception as e: - results.add_fail("automation_snapshot_check", str(e)) - - -def run_loudness_checks(client: AbletonSocketClient, results: TestResult) -> None: - """Verify basic loudness levels using output meters.""" - try: - tracks = expect_success("get_tracks", client.send("get_tracks")) - - tracks_with_signal = 0 - max_level = 0.0 - level_samples = [] - - for track in tracks: - output_level = track.get("output_meter_level", 0.0) - left = track.get("output_meter_left", 0.0) - right = track.get("output_meter_right", 0.0) - - if output_level and output_level > 0: - tracks_with_signal += 1 - max_level = max(max_level, output_level) - level_samples.append(f"{track.get('name', '?')[:15]}:{output_level:.2f}") - - # Check for stereo balance - if left and right and left > 0 and right > 0: - balance = abs(left - right) - if balance < 0.1: - pass # Balanced stereo - - if tracks_with_signal > 0: - results.add_pass("loudness_signal_detected", f"tracks_with_signal={tracks_with_signal} max_level={max_level:.3f}") - else: - results.add_skip("loudness_signal", "no signal detected (playback may be stopped)") - - # Check for clipping (levels > 1.0) - if max_level > 1.0: - results.add_fail("loudness_clipping", f"max_level={max_level:.3f} indicates potential clipping") - else: - results.add_pass("loudness_no_clipping", f"max_level={max_level:.3f}") - - # Sample levels for verification - if level_samples: - results.add_pass("loudness_levels", ", ".join(level_samples[:5])) - - except Exception as e: - results.add_fail("loudness_check", str(e)) - - -def run_critical_layer_checks(client: AbletonSocketClient, results: TestResult) -> None: - """Verify critical layers (kick, bass, clap, hat) exist and have content.""" - try: - tracks = expect_success("get_tracks", client.send("get_tracks")) - track_names = [str(t.get("name", "")).upper() for t in tracks if isinstance(t, dict)] - - found_layers = {role: False for role in EXPECTED_CRITICAL_ROLES} - for track_name in track_names: - for role in EXPECTED_CRITICAL_ROLES: - if role.upper() in track_name or f"AUDIO {role.upper()}" in track_name: - found_layers[role] = True - break - - for role, found in found_layers.items(): - if found: - results.add_pass(f"critical_layer_{role}", "found in tracks") - else: - results.add_fail(f"critical_layer_{role}", "missing - set may sound incomplete") - except Exception as e: - results.add_fail("critical_layer_check", str(e)) - - -def run_derived_fx_checks(client: AbletonSocketClient, results: TestResult) -> None: - """Verify derived FX tracks (AUDIO RESAMPLE) are present.""" - try: - tracks = expect_success("get_tracks", client.send("get_tracks")) - track_names = [str(t.get("name", "")).upper() for t in tracks if isinstance(t, dict)] - - found_derived = [] - missing_derived = [] - for expected in AUDIO_RESAMPLE_TRACKS: - if any(expected.upper() in name for name in track_names): - found_derived.append(expected) - else: - missing_derived.append(expected) - - if found_derived: - results.add_pass("derived_fx_found", f"layers={found_derived}") - - if missing_derived: - results.add_skip("derived_fx_missing", f"not_found={missing_derived} (may require reference audio)") - else: - results.add_pass("derived_fx_complete", "all 4 resample layers present") - - except Exception as e: - results.add_fail("derived_fx_check", str(e)) - - -def run_export_readiness_checks(client: AbletonSocketClient, results: TestResult) -> None: - """Verify set is ready for export.""" - try: - expect_success("get_session_info", client.send("get_session_info")) - tracks = expect_success("get_tracks", client.send("get_tracks")) - - issues = [] - - track_count = len(tracks) if isinstance(tracks, list) else 0 - if track_count < MIN_TRACKS_FOR_EXPORT: - issues.append(f"insufficient_tracks: {track_count} (need {MIN_TRACKS_FOR_EXPORT}+)") - - master_response = client.send("get_track_info", {"track_type": "master", "track_index": 0}) - if master_response.get("status") == "success": - master_volume = float(master_response.get("result", {}).get("volume", 0.85)) - if master_volume < MASTER_VOLUME_RANGE[0]: - issues.append(f"master_volume_low: {master_volume:.2f}") - elif master_volume > MASTER_VOLUME_RANGE[1]: - issues.append(f"master_volume_high: {master_volume:.2f}") - - muted_count = sum(1 for t in tracks if isinstance(t, dict) and t.get("mute", False)) - if muted_count > track_count * 0.5: - issues.append(f"too_many_muted: {muted_count}/{track_count}") - - if issues: - results.add_pass("export_readiness_issues", f"issues={len(issues)}") - for issue in issues: - results.add_fail(f"export_ready_{issue.split(':')[0]}", issue) - else: - results.add_pass("export_ready", "set appears ready for export") - - except Exception as e: - results.add_fail("export_readiness_check", str(e)) - - -def run_midi_clip_content_checks(client: AbletonSocketClient, results: TestResult) -> None: - """Verify MIDI tracks have clips with notes.""" - try: - tracks = expect_success("get_tracks", client.send("get_tracks")) - - midi_tracks_empty = [] - midi_tracks_with_notes = 0 - - for track in tracks: - if not isinstance(track, dict): - continue - track_type = str(track.get("type", "")).lower() - if track_type != "midi": - continue - - track_name = track.get("name", "?") - clips = track.get("clips", []) - if not isinstance(clips, list): - clips = [] - - has_notes = False - empty_clips = [] - for clip in clips: - if not isinstance(clip, dict): - continue - notes_count = clip.get("notes_count", 0) - has_notes_flag = clip.get("has_notes", None) - if has_notes_flag is True or notes_count > 0: - has_notes = True - elif has_notes_flag is False or (has_notes_flag is None and notes_count == 0): - empty_clips.append(clip.get("name", "?")) - if has_notes: - midi_tracks_with_notes += 1 - elif empty_clips: - midi_tracks_empty.append({ - "track_name": track_name, - "empty_clips_count": len(empty_clips), - }) - - if midi_tracks_with_notes > 0: - results.add_pass("midi_tracks_with_notes", f"count={midi_tracks_with_notes}") - - if midi_tracks_empty: - for track_info in midi_tracks_empty[:3]: - results.add_fail( - f"midi_track_empty_{track_info['track_name'][:20]}", - f"Track has {track_info['empty_clips_count']} empty MIDI clips - may need notes" - ) - - except Exception as e: - results.add_fail("midi_clip_content_check", str(e)) - - -def run_bus_signal_checks(client: AbletonSocketClient, results: TestResult) -> None: - """Verify buses receive signal from tracks.""" - try: - buses_payload = expect_success("list_buses", client.send("list_buses")) - buses = _extract_bus_payload(buses_payload) - tracks = expect_success("get_tracks", client.send("get_tracks")) - - bus_signal_map = {} - for bus in buses: - if not isinstance(bus, dict): - continue - bus_name = bus.get("name", "").upper() - bus_signal_map[bus_name] = {"senders": [], "has_signal": False} - - for track in tracks: - if not isinstance(track, dict): - continue - track_name = str(track.get("name", "")).upper() - output_routing = str(track.get("current_output_routing", "")).upper() - - for bus_name in bus_signal_map: - if bus_name in output_routing: - bus_signal_map[bus_name]["senders"].append(track_name) - - sends = track.get("sends", []) - if isinstance(sends, list): - for send_level in sends: - try: - if float(send_level) > 0.01: - pass - except (TypeError, ValueError): - pass - - buses_without_senders = [] - buses_with_senders = [] - - for bus_name, info in bus_signal_map.items(): - if info["senders"]: - buses_with_senders.append(bus_name) - else: - buses_without_senders.append(bus_name) - - if buses_with_senders: - results.add_pass("buses_with_signal", f"buses={buses_with_senders}") - - if buses_without_senders: - for bus_name in buses_without_senders[:3]: - results.add_fail(f"bus_no_signal_{bus_name[:15]}", - f"Bus '{bus_name}' has no routed tracks - will not produce output") - - except Exception as e: - results.add_fail("bus_signal_check", str(e)) - - -def run_clipping_detection(client: AbletonSocketClient, results: TestResult) -> None: - """Detect tracks with dangerously high volume (clipping risk).""" - try: - tracks = expect_success("get_tracks", client.send("get_tracks")) - - clipping_tracks = [] - high_volume_tracks = [] - - for track in tracks: - if not isinstance(track, dict): - continue - track_name = track.get("name", "?") - volume = float(track.get("volume", 0.85)) - - if volume > 0.95: - clipping_tracks.append({"name": track_name, "volume": volume}) - elif volume > 0.90: - high_volume_tracks.append({"name": track_name, "volume": volume}) - - if clipping_tracks: - for track_info in clipping_tracks[:3]: - results.add_fail(f"clipping_track_{track_info['name'][:15]}",f"Volume {track_info['volume']:.2f} > 0.95 - CLIPPING RISK") - - if high_volume_tracks: - for track_info in high_volume_tracks[:3]: - results.add_warning(f"high_volume_{track_info['name'][:15]}", - f"Volume {track_info['volume']:.2f} - consider reducing") - - if not clipping_tracks and not high_volume_tracks: - results.add_pass("no_clipping_tracks", "All track volumes in safe range") - - except Exception as e: - results.add_fail("clipping_detection", str(e)) - - -def run_all_phase7_tests(client: AbletonSocketClient, results: TestResult) -> None: - """Run all Phase 7 smoke tests.""" - print("\n[Phase 7] Running bus verification...") - run_bus_checks(client, results) - - print("[Phase 7] Running routing verification...") - run_routing_checks(client, results) - - print("[Phase 7] Running AUDIO RESAMPLE track verification...") - run_audio_resample_checks(client, results) - - print("[Phase 7] Running automation snapshot verification...") - run_automation_snapshot_checks(client, results) - - print("[Phase 7] Running loudness verification...") - run_loudness_checks(client, results) - - print("[Phase 7] Running critical layer verification...") - run_critical_layer_checks(client, results) - - print("[Phase 7] Running derived FX verification...") - run_derived_fx_checks(client, results) - - print("[Phase 7] Running export readiness verification...") - run_export_readiness_checks(client, results) - - print("[Phase 7] Running MIDI clip content verification...") - run_midi_clip_content_checks(client, results) - - print("[Phase 7] Running bus signal verification...") - run_bus_signal_checks(client, results) - - print("[Phase 7] Running clipping detection...") - run_clipping_detection(client, results) - - -def main() -> int: - parser = argparse.ArgumentParser(description="Smoke test for AbletonMCP_AI socket runtime") - parser.add_argument("--host", default="127.0.0.1") - parser.add_argument("--port", type=int, default=9877) - parser.add_argument("--timeout", type=float, default=15.0) - parser.add_argument("--generate-demo", action="store_true") - parser.add_argument("--genre", default="techno") - parser.add_argument("--style", default="industrial") - parser.add_argument("--bpm", type=float, default=128.0) - parser.add_argument("--key", default="Am") - parser.add_argument("--structure", default="standard") - parser.add_argument("--use-blueprint", action="store_true") - parser.add_argument("--phase7", action="store_true", help="Run Phase 7 extended tests (buses, routing, audio resample, automation, loudness)") - parser.add_argument("--json-report", action="store_true", help="Output report as JSON") - args = parser.parse_args() - - client = AbletonSocketClient(host=args.host, port=args.port, timeout=args.timeout) - - # Run basic checks - print("[Basic] Running readonly checks...") - checks = run_readonly_checks(client) - - for name, details in checks: - print(f"[ok] {name}: {details}") - - # Run generation check if requested - if args.generate_demo: - print("\n[Generation] Running generation check...") - checks.extend( - run_generation_check( - client, - genre=args.genre, - style=args.style, - bpm=args.bpm, - key=args.key, - structure=args.structure, - use_blueprint=args.use_blueprint, - ) - ) - for name, details in checks[-2:]: - print(f"[ok] {name}: {details}") - - # Run Phase 7 tests if requested - results = TestResult() - if args.phase7: - run_all_phase7_tests(client, results) - - if args.json_report: - print(json.dumps(results.to_dict(), indent=2)) - else: - results.print_report() - - return 0 if len(results.failed) == 0 else 1 - - return 0 - - -if __name__ == "__main__": - raise SystemExit(main()) diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/song_generator.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/song_generator.py deleted file mode 100644 index 141f1fc..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/song_generator.py +++ /dev/null @@ -1,6268 +0,0 @@ -""" -song_generator.py - Generador musical para AbletonMCP-AI. -""" - -import random -import logging -from typing import List, Dict, Any, Optional, Union, Tuple -from dataclasses import dataclass -from pathlib import Path -from collections import defaultdict - -logger = logging.getLogger("SongGenerator") - -# Notas MIDI para referencia -NOTE_NAMES = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B'] - -# Escalas comunes (semitonos desde la raíz) -SCALES = { - 'major': [0, 2, 4, 5, 7, 9, 11], - 'minor': [0, 2, 3, 5, 7, 8, 10], - 'harmonic_minor': [0, 2, 3, 5, 7, 8, 11], - 'dorian': [0, 2, 3, 5, 7, 9, 10], - 'phrygian': [0, 1, 3, 5, 7, 8, 10], - 'mixolydian': [0, 2, 4, 5, 7, 9, 10], - 'pentatonic_minor': [0, 3, 5, 7, 10], - 'pentatonic_major': [0, 2, 4, 7, 9], - 'blues': [0, 3, 5, 6, 7, 10], -} - -# Progresiones de acordes comunes -CHORD_PROGRESSIONS = { - 'techno': [ - [1, 1, 1, 1], # i - i - i - i (minimal) - [1, 6, 1, 6], # i - VI - i - VI - [1, 4, 1, 4], # i - iv - i - iv - [1, 7, 6, 7], # i - VII - VI - VII - ], - 'house': [ - [1, 5, 6, 4], # I - V - vi - IV (pop house) - [1, 4, 5, 1], # I - IV - V - I - [6, 4, 1, 5], # vi - IV - I - V - [1, 6, 4, 5], # I - vi - IV - V - ], - 'deep': [ - [1, 6, 2, 5], # i - VI - ii - V - [2, 5, 1, 6], # ii - V - i - VI - ], - 'tech-house': [ - [1, 6, 3, 6], # i - VI - III - VI (dark, hypnotic: Am -> Fm -> Cm -> Fm) - [1, 5, 1, 5], # i - v - i - v (two-chord drop loop: Dm -> Am -> Dm -> Am) - [1, 5, 6, 5], # i - v - VI - v (minimalist tension: Cm -> Gm -> Ab -> Gm) - [1, 4, 1, 4], # i - iv - i - iv (groovy: Am -> Dm -> Am -> Dm) - ], - 'trance': [ - [1, 5, 6, 4], # I - V - vi - IV - [6, 4, 1, 5], # vi - IV - I - V - [1, 4, 6, 5], # I - IV - vi - V - ], -} - -# Configuraciones por género -GENRE_CONFIGS = { - 'techno': { - 'bpm_range': (125, 140), - 'default_bpm': 132, - 'keys': ['Am', 'Fm', 'Dm', 'G#m', 'Cm'], - 'styles': ['industrial', 'peak-time', 'dub', 'minimal', 'acid'], - }, - 'house': { - 'bpm_range': (120, 128), - 'default_bpm': 124, - 'keys': ['Am', 'Em', 'Cm', 'Gm', 'Dm', 'F#m'], - 'styles': ['deep', 'tech-house', 'progressive', 'afro', 'classic', 'funky'], - }, - 'tech-house': { - 'bpm_range': (122, 128), - 'default_bpm': 126, - 'keys': ['Am', 'Fm', 'Dm', 'Gm', 'Cm'], - 'styles': ['groovy', 'bouncy', 'minimal', 'latin', 'latin-industrial', 'jackin', 'swing', 'latin-tech-house'], - }, - 'trance': { - 'bpm_range': (135, 150), - 'default_bpm': 140, - 'keys': ['Fm', 'Am', 'Dm', 'Gm', 'Cm'], - 'styles': ['progressive', 'uplifting', 'psy', 'acid'], - }, - 'drum-and-bass': { - 'bpm_range': (160, 180), - 'default_bpm': 174, - 'keys': ['Am', 'Fm', 'Gm', 'Cm'], - 'styles': ['liquid', 'neuro', 'jump-up', 'jungle'], - }, -} - -# Colores por tipo de track -TRACK_COLORS = { - 'kick': 10, # Rojo - 'snare': 20, # Verde - 'hat': 5, # Amarillo - 'clap': 45, # Naranja - 'bass': 30, # Azul - 'synth': 50, # Rosa/Magenta - 'chords': 60, # Púrpura - 'fx': 25, # Verde claro - 'vocal': 15, # Naranja oscuro - 'pad': 55, # Purpura claro - 'perc': 20, # Verde - 'ride': 14, # Amarillo oscuro - 'technical': 58, # Gris -} - -BUS_TRACK_COLORS = { - 'drums': 10, - 'bass': 30, - 'music': 50, - 'vocal': 15, - 'fx': 25, - 'sc_trigger': 58, # Gris - track fantasma para sidechain -} - -# NTH-04: Genre-specific color palettes for visual consistency -GENRE_COLOR_PALETTES = { - 'tech-house': { - 'kick': 13, 'clap': 11, 'snare': 11, 'hat': 12, - 'bass': 35, 'sub_bass': 33, - 'synth': 53, 'chords': 51, 'pad': 55, 'pluck': 50, - 'fx': 21, 'vocal': 17, 'perc': 20, 'ride': 14, - 'technical': 58, - }, - 'techno': { - 'kick': 0, 'clap': 5, 'hat': 3, - 'bass': 30, 'synth': 45, - 'fx': 25, 'perc': 20, 'technical': 58, - }, - 'house': { - 'kick': 10, 'clap': 15, 'hat': 20, - 'bass': 34, 'synth': 50, 'chords': 55, - 'fx': 25, 'vocal': 40, 'perc': 20, 'technical': 58, - }, -} - -# Configuracion de sidechain por bus -# Cada bus puede tener sidechain desde SC TRIGGER -BUS_SIDECHAIN_CONFIG = { - 'drums': { - 'enabled': False, # Drums no suele necesitar sidechain - 'threshold': -18.0, - 'attack': 0.003, - 'release': 0.08, - 'ratio': 4.0, - }, - 'bass': { - 'enabled': True, # Sidechain clave para bass - 'threshold': -22.0, - 'attack': 0.002, - 'release': 0.12, - 'ratio': 4.5, - }, - 'music': { - 'enabled': True, # Sidechain sutil para musica - 'threshold': -26.0, - 'attack': 0.005, - 'release': 0.18, - 'ratio': 3.0, - }, - 'vocal': { - 'enabled': True, # Sidechain suave para vocal - 'threshold': -28.0, - 'attack': 0.008, - 'release': 0.22, - 'ratio': 2.5, - }, - 'fx': { - 'enabled': False, # FX generalmente sin sidechain - 'threshold': -30.0, - 'attack': 0.01, - 'release': 0.3, - 'ratio': 2.0, - }, -} - -# ============================================================================= -# FASE 3: LOUDNESS CONSISTENCY Y GAIN STAGING -# ============================================================================= -# -# CALIBRATION PHILOSOPHY: -# ====================== -# - Kick sits at unity (0.85) as the rhythmic anchor -# - Bass sits slightly below kick (-1dB) for low-end presence without mud -# - Supporting elements progressively lower to create mix depth -# - Buses attenuated to preserve master headroom -# - Master chain with soft limiting for consistent output -# -# HEADROOM TARGETS: -# ================= -# - Track peaks: -6dB to -3dB before bus -# - Bus peaks: -3dB to -1dB before master -# - Master out: -1dB peak (limited), integrated LUFS ~-10 to -8 - -# Headroom target en dB (negativo para dejar espacio antes del limiter) -TARGET_HEADROOM_DB = -1.5 # 1.5dB de headroom antes del limiter - -# Safe limiting threshold - prevents digital clipping -MASTER_LIMITER_CEILING_DB = -0.3 # Never go above -0.3dBFS on master - -# Calibracion de ganancia por bus (valores lineales 0.0-1.0) -# Calibrado empiricamente para headroom consistente y balance de mezcla -# K: Drums como elemento principal, B: Bass como soporte, M: Music como capa -BUS_GAIN_CALIBRATION = { - 'drums': { - 'volume': 0.92, # Drums bus: principal, mas alto - 'limiter_gain': 0.0, # Sin gain adicional en limiter de bus - 'compressor_threshold': -16.0, # Compression suave para punch - 'saturator_drive': 0.6, # armonia sutil, no crunchy - 'utility_gain': 0.0, # Sin gain adicional - }, - 'bass': { - 'volume': 0.88, # Bass bus: soporte fuerte - 'limiter_gain': 0.0, # Sin limiter en bass bus (soft clip natural) - 'compressor_threshold': -18.0, # Threshold suave para low-end - 'saturator_drive': 0.4, # Saturacion sutil - evitar crunch - 'utility_gain': 0.0, # Sin gain adicional - }, - 'music': { - 'volume': 0.85, # Music bus: capa principal - 'limiter_gain': 0.0, # Sin limiter en music bus - 'compressor_threshold': -20.0, # Preservar transients - 'saturator_drive': 0.0, # Sin saturacion en bus de musica - 'utility_gain': 0.0, - }, - 'vocal': { - 'volume': 0.82, # Vocal bus: presente en mezcla - 'limiter_gain': 0.0, # Sin limiter - 'compressor_threshold': -16.0, # Compresion sutil para presencia - 'saturator_drive': 0.0, - 'utility_gain': 0.0, - }, - 'fx': { - 'volume': 0.78, # FX bus: efectos audibles - 'limiter_gain': 0.0, # Sin gain - 'compressor_threshold': -22.0, # Preservar dynamics - 'saturator_drive': 0.0, - 'utility_gain': 0.0, # Sin reduccion - }, - 'sc_trigger': { - 'volume': 0.0, # Track fantasma - sin audio - 'limiter_gain': 0.0, - 'compressor_threshold': 0.0, - 'saturator_drive': 0.0, - 'utility_gain': 0.0, - }, -} - -# Master chain calibracion -# Calibrado para LUFS ~-8 a -10dB con headroom de 1-2dB antes del limiter -# El limiter ceiling esta en -0.3dB para evitar digital clipping -MASTER_CALIBRATION = { - 'default': { - 'volume': 0.85, # Master at ~0dB de ganancia interna - 'utility_gain': 0.0, # Sin reduccion - volumen completo - 'stereo_width': 1.04, # Ligerisimo widening - 'saturator_drive': 0.12, # Saturacion muy sutil en master - 'compressor_ratio': 0.50, # Compresion suave (glue, no squash) - 'compressor_attack': 0.30, # Attack lento para preservar transients - 'compressor_release': 0.20, - 'limiter_gain': 3.5, # +3.5dB make-up gain para nivel moderno - 'limiter_ceiling': -0.3, # Ceiling a -0.3dBFS (safe limiting) - }, - 'warehouse': { - 'volume': 0.85, - 'utility_gain': 0.0, # Sin reduccion - 'saturator_drive': 0.25, # Mas drive para industrial techno - 'compressor_ratio': 0.55, # Un poco mas de compresion - 'limiter_gain': 3.8, # Mas gain para industrial - 'limiter_ceiling': -0.3, - }, - 'festival': { - 'volume': 0.86, - 'utility_gain': 0.0, # Sin reduccion - 'stereo_width': 1.06, # Mas ancho para festival - 'limiter_gain': 4.0, # Maximo gain para festival - 'limiter_ceiling': -0.3, - }, - 'swing': { - 'volume': 0.85, - 'utility_gain': 0.0, - 'saturator_drive': 0.15, # Moderado - 'limiter_gain': 3.2, - 'limiter_ceiling': -0.3, - }, - 'jackin': { - 'volume': 0.85, - 'utility_gain': 0.0, - 'compressor_ratio': 0.52, - 'limiter_gain': 3.0, - 'limiter_ceiling': -0.3, - }, - 'tech-house-club': { - 'volume': 0.85, - 'utility_gain': 0.0, # Sin reduccion - 'stereo_width': 1.04, - 'saturator_drive': 0.4, # Mas drive para punch - 'compressor_ratio': 0.60, # Mas compresion para club - 'compressor_attack': 0.28, - 'limiter_gain': 3.5, - 'limiter_ceiling': -0.3, - }, - 'tech-house-deep': { - 'volume': 0.85, - 'utility_gain': 0.0, # Sin reduccion - 'stereo_width': 1.02, # Narrower para deep - 'saturator_drive': 0.1, # Muy sutil - 'compressor_ratio': 0.50, - 'compressor_attack': 0.38, # Mas lento para deep - 'limiter_gain': 3.0, - 'limiter_ceiling': -0.3, - }, - 'tech-house-funky': { - 'volume': 0.85, - 'utility_gain': 0.0, - 'stereo_width': 1.08, # Wide para groove - 'saturator_drive': 0.3, - 'compressor_ratio': 0.55, - 'compressor_attack': 0.30, - 'limiter_gain': 3.5, - 'limiter_ceiling': -0.3, - }, -} - -# Calibracion de gain por rol para consistencia de mezcla -# Valores calibrados empiricamente basados en: -# - Kick como ancla a 0.85 -# - Bass -1dB relativo a kick -# - Elementos de soporte progresivamente mas bajos -# - Headroom preservado en cada capa -ROLE_GAIN_CALIBRATION = { - # DRUMS - Kick es el ancla, otros elementos debajo - 'kick': { - 'volume': 0.85, # Ancla: 0dB relativo, elemento principal - 'saturator_drive': 1.5, # Saturacion sutil para punch - 'peak_reduction': 0.0, # Sin reduccion - es el ancla - }, - 'clap': { - 'volume': 0.78, # -1.5dB relativo a kick - 'saturator_drive': 0.0, # Sin saturacion - 'peak_reduction': 0.0, - }, - 'snare_fill': { - 'volume': 0.72, # -3dB, transitorio fuerte - 'peak_reduction': 0.0, - }, - 'hat_closed': { - 'volume': 0.68, # -4dB, elemento secundario - 'peak_reduction': 0.0, - }, - 'hat_open': { - 'volume': 0.65, # -4.5dB, mas abajo por sustain - 'peak_reduction': 0.0, - }, - 'top_loop': { - 'volume': 0.62, # -5dB, capa ritmica secundaria - 'peak_reduction': 0.0, - }, - 'perc': { - 'volume': 0.70, # -3.5dB, soporte ritmico - 'peak_reduction': 0.0, - }, - 'ride': { - 'volume': 0.58, # -5.5dB, sustain largo - 'peak_reduction': 0.0, - }, - 'crash': { - 'volume': 0.50, # -7dB, transitorio largo - 'peak_reduction': 0.0, - }, - 'tom_fill': { - 'volume': 0.68, # -4dB, transitorio - 'peak_reduction': 0.0, - }, - # BASS - Underground but underneath drums - 'sub_bass': { - 'volume': 0.80, # -0.5dB relativo a kick - 'saturator_drive': 0.0, # Sin saturacion en sub - 'peak_reduction': 0.0, - }, - 'bass': { - 'volume': 0.78, # -1dB relativo a kick - 'saturator_drive': 2.0, # Moderado para harmonic content - 'peak_reduction': 0.0, - }, - # MUSIC - Capas de soporte, debajo del low-end - 'drone': { - 'volume': 0.55, # -7dB, elemento de fondo - 'peak_reduction': 0.0, - }, - 'chords': { - 'volume': 0.70, # -3dB, armonia principal - 'peak_reduction': 0.0, - }, - 'stab': { - 'volume': 0.65, # -4dB, transitorio - 'saturator_drive': 1.8, # Moderado - 'peak_reduction': 0.0, - }, - 'pad': { - 'volume': 0.60, # -5dB, fondo armonico - 'peak_reduction': 0.0, - }, - 'pluck': { - 'volume': 0.68, # -3.5dB, melodia sutil - 'peak_reduction': 0.0, - }, - 'arp': { - 'volume': 0.65, # -4dB, movimiento armonico - 'peak_reduction': 0.0, - }, - 'lead': { - 'volume': 0.72, # -2.5dB, elemento principal musical - 'saturator_drive': 1.2, # Moderado - 'peak_reduction': 0.0, - }, - 'counter': { - 'volume': 0.62, # -5dB, contramelodia - 'peak_reduction': 0.0, - }, - # FX - Efectos en el fondo de la mezcla - 'reverse_fx': { - 'volume': 0.52, # -7dB, efecto ambiente - 'peak_reduction': 0.0, - }, - 'riser': { - 'volume': 0.60, # -5dB, sube hacia el climax - 'peak_reduction': 0.0, - }, - 'impact': { - 'volume': 0.55, # -6dB, efecto puntual - 'peak_reduction': 0.0, - }, - 'atmos': { - 'volume': 0.50, # -8dB, fondo atmosferico - 'peak_reduction': 0.0, - }, - # VOCAL - 'vocal': { - 'volume': 0.70, # -3dB, debajo de drums pero presente - 'peak_reduction': 0.0, - }, - # SC TRIGGER - Track fantasma para sidechain - 'sc_trigger': { - 'volume': 0.0, # Sin salida de audio - 'saturator_drive': 0.0, - 'peak_reduction': 0.0, - }, -} - -# Factores de ajuste por estilo -# NOTA: NO usar multiplicadores de volumen que rompan el gain staging -# Solo ajustes sutiles de procesamiento y sends -STYLE_GAIN_ADJUSTMENTS = { - 'industrial': { - 'saturator_drive_factor': 1.3, # Aumentar drive en elementos agresivos - 'additional_heat_send': 0.05, # Un poco mas de heat - 'limiter_gain_factor': 1.15, # +15% gain para industrial techno - }, - 'latin': { - 'additional_pan_width': 0.05, - }, - 'peak-time': { - 'master_compressor_ratio_factor': 1.1, - 'limiter_gain_factor': 1.1, # +10% gain para peak-time - }, - 'minimal': { - 'fx_bus_send_reduction': 0.05, - 'additional_space_send': 0.03, # Un poco mas de reverb para espacio - }, -} - -ROLE_BUS_ASSIGNMENTS = { - 'sc_trigger': 'sc_trigger', # Rutea a su propio bus fantasma - 'kick': 'drums', - 'clap': 'drums', - 'snare_fill': 'drums', - 'hat_closed': 'drums', - 'hat_open': 'drums', - 'top_loop': 'drums', - 'perc': 'drums', - 'tom_fill': 'drums', - 'ride': 'drums', - 'crash': 'drums', - 'sub_bass': 'bass', - 'bass': 'bass', - 'drone': 'music', - 'chords': 'music', - 'stab': 'music', - 'pad': 'music', - 'pluck': 'music', - 'arp': 'music', - 'lead': 'music', - 'counter': 'music', - 'reverse_fx': 'fx', - 'riser': 'fx', - 'impact': 'fx', - 'atmos': 'fx', - 'vocal': 'vocal', -} - -SECTION_BLUEPRINTS = { - 'minimal': [ - ('INTRO', 8, 12, 'intro', 1), - ('GROOVE', 16, 20, 'build', 2), - ('BREAK', 8, 25, 'break', 1), - ('OUTRO', 8, 8, 'outro', 1), - ], - 'standard': [ - ('INTRO', 8, 12, 'intro', 1), - ('BUILD', 8, 18, 'build', 2), - ('DROP A', 16, 28, 'drop', 4), - ('BREAK', 8, 25, 'break', 1), - ('DROP B', 16, 30, 'drop', 5), - ('OUTRO', 8, 8, 'outro', 1), - ], - 'extended': [ - ('INTRO DJ', 16, 10, 'intro', 1), - ('BUILD A', 8, 18, 'build', 2), - ('DROP A', 16, 28, 'drop', 4), - ('BREAKDOWN', 8, 25, 'break', 1), - ('BUILD B', 8, 18, 'build', 3), - ('DROP B', 16, 30, 'drop', 5), - ('OUTRO DJ', 16, 8, 'outro', 1), - ], - 'club': [ - ('INTRO DJ', 16, 10, 'intro', 1), - ('GROOVE A', 16, 14, 'build', 2), - ('VOCAL BUILD', 8, 18, 'build', 3), - ('DROP A', 16, 28, 'drop', 4), - ('BREAKDOWN', 8, 25, 'break', 1), - ('BUILD B', 8, 18, 'build', 3), - ('DROP B', 16, 30, 'drop', 5), - ('PEAK', 8, 32, 'drop', 5), - ('OUTRO DJ', 16, 8, 'outro', 1), - ], - 'tech-house-dj': [ - ('INTRO DJ', 32, 8, 'intro', 1), - ('GROOVE A', 16, 16, 'build', 2), - ('VOX TEASE', 8, 20, 'build', 3), - ('DROP A', 32, 30, 'drop', 5), - ('BREAK', 8, 22, 'break', 1), - ('BUILD', 8, 24, 'build', 3), - ('DROP B', 32, 32, 'drop', 5), - ('OUTRO DJ', 32, 8, 'outro', 1), - ], -} - -SECTION_BLUEPRINT_VARIANTS = { - 'standard': [ - SECTION_BLUEPRINTS['standard'], - [ - ('INTRO', 8, 12, 'intro', 1), - ('GROOVE A', 8, 16, 'build', 2), - ('DROP A', 16, 28, 'drop', 4), - ('BREAKDOWN', 8, 24, 'break', 1), - ('BUILD B', 8, 20, 'build', 3), - ('DROP B', 16, 31, 'drop', 5), - ], - [ - ('INTRO DJ', 16, 10, 'intro', 1), - ('BUILD', 8, 18, 'build', 2), - ('DROP A', 16, 28, 'drop', 4), - ('MID BREAK', 8, 22, 'break', 1), - ('PEAK', 16, 31, 'drop', 5), - ], - ], - 'club': [ - SECTION_BLUEPRINTS['club'], - [ - ('INTRO DJ', 16, 10, 'intro', 1), - ('TEASE', 8, 14, 'build', 2), - ('GROOVE A', 16, 18, 'build', 3), - ('DROP A', 16, 28, 'drop', 4), - ('BREAKDOWN', 8, 24, 'break', 1), - ('BUILD B', 8, 20, 'build', 3), - ('PEAK', 16, 32, 'drop', 5), - ('OUTRO DJ', 24, 8, 'outro', 1), - ], - [ - ('INTRO DJ', 16, 10, 'intro', 1), - ('GROOVE A', 16, 15, 'build', 2), - ('VOCAL BUILD', 8, 20, 'build', 3), - ('DROP A', 16, 27, 'drop', 4), - ('MID BREAK', 8, 22, 'break', 1), - ('GROOVE B', 8, 18, 'build', 3), - ('DROP B', 24, 31, 'drop', 5), - ('OUTRO DJ', 16, 8, 'outro', 1), - ], - ], -} - -ROLE_ACTIVITY = { - 'sc_trigger': {'intro': 4, 'build': 4, 'drop': 4, 'break': 2, 'outro': 3}, - 'kick': {'intro': 2, 'build': 3, 'drop': 4, 'break': 1, 'outro': 2}, - 'clap': {'intro': 0, 'build': 2, 'drop': 4, 'break': 1, 'outro': 1}, - 'snare_fill': {'intro': 0, 'build': 2, 'drop': 1, 'break': 1, 'outro': 0}, - 'hat_closed': {'intro': 1, 'build': 2, 'drop': 4, 'break': 1, 'outro': 1}, - 'hat_open': {'intro': 0, 'build': 1, 'drop': 3, 'break': 0, 'outro': 1}, - 'top_loop': {'intro': 0, 'build': 2, 'drop': 4, 'break': 1, 'outro': 1}, - 'perc': {'intro': 0, 'build': 2, 'drop': 3, 'break': 1, 'outro': 0}, - 'tom_fill': {'intro': 0, 'build': 1, 'drop': 1, 'break': 0, 'outro': 0}, - 'ride': {'intro': 0, 'build': 1, 'drop': 2, 'break': 0, 'outro': 1}, - 'crash': {'intro': 0, 'build': 1, 'drop': 1, 'break': 0, 'outro': 0}, - 'sub_bass': {'intro': 0, 'build': 2, 'drop': 4, 'break': 1, 'outro': 1}, - 'bass': {'intro': 0, 'build': 2, 'drop': 4, 'break': 1, 'outro': 1}, - 'drone': {'intro': 2, 'build': 2, 'drop': 2, 'break': 3, 'outro': 2}, - 'chords': {'intro': 0, 'build': 2, 'drop': 3, 'break': 2, 'outro': 1}, - 'stab': {'intro': 0, 'build': 2, 'drop': 4, 'break': 1, 'outro': 0}, - 'pad': {'intro': 2, 'build': 2, 'drop': 2, 'break': 3, 'outro': 2}, - 'pluck': {'intro': 0, 'build': 2, 'drop': 3, 'break': 0, 'outro': 0}, - 'arp': {'intro': 0, 'build': 2, 'drop': 3, 'break': 1, 'outro': 0}, - 'lead': {'intro': 0, 'build': 1, 'drop': 4, 'break': 0, 'outro': 0}, - 'counter': {'intro': 0, 'build': 1, 'drop': 3, 'break': 1, 'outro': 0}, - 'reverse_fx': {'intro': 0, 'build': 2, 'drop': 1, 'break': 1, 'outro': 0}, - 'riser': {'intro': 0, 'build': 3, 'drop': 1, 'break': 2, 'outro': 0}, - 'impact': {'intro': 0, 'build': 2, 'drop': 1, 'break': 1, 'outro': 0}, - 'atmos': {'intro': 2, 'build': 1, 'drop': 1, 'break': 3, 'outro': 2}, - 'vocal': {'intro': 0, 'build': 1, 'drop': 2, 'break': 1, 'outro': 0}, -} - -# ROLE_MIX: Perfil de mezcla por rol -# Valores base que luego se calibran con ROLE_GAIN_CALIBRATION -# Volumenes calibrados relativos: kick = 0%, otros debajo -# Pan y sends optimizados para profundidad y espacio -ROLE_MIX = { - 'sc_trigger': {'volume': 0.0, 'pan': 0.0, 'sends': {'space': 0.0, 'echo': 0.0, 'heat': 0.0, 'glue': 0.0}}, - # DRUMS - Kick centered, elements below - 'kick': {'volume': 0.85, 'pan': 0.0, 'sends': {'space': 0.0, 'echo': 0.0, 'heat': 0.0, 'glue': 0.08}}, - 'clap': {'volume': 0.78, 'pan': 0.0, 'sends': {'space': 0.14, 'echo': 0.04, 'heat': 0.02, 'glue': 0.10}}, - 'snare_fill': {'volume': 0.72, 'pan': 0.0, 'sends': {'space': 0.12, 'echo': 0.10, 'heat': 0.01, 'glue': 0.06}}, - 'hat_closed': {'volume': 0.68, 'pan': -0.10, 'sends': {'space': 0.04, 'echo': 0.03, 'heat': 0.0, 'glue': 0.04}}, - 'hat_open': {'volume': 0.65, 'pan': 0.12, 'sends': {'space': 0.10, 'echo': 0.08, 'heat': 0.01, 'glue': 0.06}}, - 'top_loop': {'volume': 0.62, 'pan': -0.16, 'sends': {'space': 0.06, 'echo': 0.12, 'heat': 0.0, 'glue': 0.08}}, - 'perc': {'volume': 0.70, 'pan': 0.20, 'sends': {'space': 0.10, 'echo': 0.14, 'heat': 0.02, 'glue': 0.10}}, - 'tom_fill': {'volume': 0.68, 'pan': 0.12, 'sends': {'space': 0.12, 'echo': 0.10, 'heat': 0.01, 'glue': 0.06}}, - 'ride': {'volume': 0.58, 'pan': 0.24, 'sends': {'space': 0.04, 'echo': 0.03, 'heat': 0.0, 'glue': 0.06}}, - 'crash': {'volume': 0.50, 'pan': 0.0, 'sends': {'space': 0.18, 'echo': 0.06, 'heat': 0.01, 'glue': 0.02}}, - # BASS - Below drums, centered for mono compatibility - 'sub_bass': {'volume': 0.80, 'pan': 0.0, 'sends': {'space': 0.0, 'echo': 0.0, 'heat': 0.0, 'glue': 0.14}}, - 'bass': {'volume': 0.78, 'pan': 0.0, 'sends': {'space': 0.01, 'echo': 0.01, 'heat': 0.04, 'glue': 0.12}}, - # MUSIC - Layers below rhythm section - 'drone': {'volume': 0.55, 'pan': 0.0, 'sends': {'space': 0.28, 'echo': 0.08, 'heat': 0.02, 'glue': 0.04}}, - 'chords': {'volume': 0.70, 'pan': -0.06, 'sends': {'space': 0.18, 'echo': 0.12, 'heat': 0.01, 'glue': 0.08}}, - 'stab': {'volume': 0.65, 'pan': 0.10, 'sends': {'space': 0.12, 'echo': 0.10, 'heat': 0.04, 'glue': 0.08}}, - 'pad': {'volume': 0.60, 'pan': -0.14, 'sends': {'space': 0.32, 'echo': 0.08, 'heat': 0.0, 'glue': 0.06}}, - 'pluck': {'volume': 0.68, 'pan': 0.14, 'sends': {'space': 0.08, 'echo': 0.18, 'heat': 0.01, 'glue': 0.06}}, - 'arp': {'volume': 0.65, 'pan': -0.18, 'sends': {'space': 0.14, 'echo': 0.24, 'heat': 0.01, 'glue': 0.08}}, - 'lead': {'volume': 0.72, 'pan': 0.06, 'sends': {'space': 0.14, 'echo': 0.18, 'heat': 0.03, 'glue': 0.10}}, - 'counter': {'volume': 0.62, 'pan': 0.20, 'sends': {'space': 0.18, 'echo': 0.14, 'heat': 0.01, 'glue': 0.06}}, - # FX - Deep in the mix - 'reverse_fx': {'volume': 0.52, 'pan': 0.0, 'sends': {'space': 0.24, 'echo': 0.10, 'heat': 0.03, 'glue': 0.02}}, - 'riser': {'volume': 0.60, 'pan': 0.0, 'sends': {'space': 0.28, 'echo': 0.14, 'heat': 0.04, 'glue': 0.03}}, - 'impact': {'volume': 0.55, 'pan': 0.0, 'sends': {'space': 0.22, 'echo': 0.12, 'heat': 0.01, 'glue': 0.03}}, - 'atmos': {'volume': 0.50, 'pan': -0.20, 'sends': {'space': 0.34, 'echo': 0.06, 'heat': 0.0, 'glue': 0.03}}, - # VOCAL - Present but under drums - 'vocal': {'volume': 0.70, 'pan': 0.08, 'sends': {'space': 0.20, 'echo': 0.24, 'heat': 0.02, 'glue': 0.10}}, -} - -ARRANGEMENT_PROFILES = ( - { - 'name': 'warehouse', - 'genres': {'techno', 'tech-house'}, - 'drum_tightness': 1.15, - 'bass_motion': 'locked', - 'melodic_motion': 'restrained', - 'pan_width': 0.12, - 'fx_bias': 1.0, - }, - { - 'name': 'jackin', - 'genres': {'house', 'tech-house'}, - 'drum_tightness': 0.96, - 'bass_motion': 'bouncy', - 'melodic_motion': 'call_response', - 'pan_width': 0.16, - 'fx_bias': 0.92, - }, - { - 'name': 'festival', - 'genres': {'trance', 'house', 'tech-house'}, - 'drum_tightness': 0.92, - 'bass_motion': 'lifted', - 'melodic_motion': 'anthemic', - 'pan_width': 0.2, - 'fx_bias': 1.18, - }, - { - 'name': 'swing', - 'genres': {'tech-house', 'house'}, - 'drum_tightness': 0.9, - 'bass_motion': 'syncopated', - 'melodic_motion': 'hooky', - 'pan_width': 0.22, - 'fx_bias': 1.05, - }, - { - 'name': 'tech-house-club', - 'genres': {'tech-house'}, - 'drum_tightness': 0.94, - 'bass_motion': 'bouncy', - 'melodic_motion': 'hooky', - 'pan_width': 0.18, - 'fx_bias': 1.08, - 'bus_names': { - 'drums': 'DRUM CLUB', - 'bass': 'BASS TUBE', - 'music': 'MUSIC JACK', - 'vocal': 'VOCAL LATIN BUS', - 'fx': 'FX JAM', - }, - 'return_names': { - 'space': 'REVERB SHORT', - 'echo': 'DELAY MONO', - 'heat': 'DRIVE HOT', - 'glue': 'GLUE BUS', - }, - }, - { - 'name': 'tech-house-deep', - 'genres': {'tech-house'}, - 'drum_tightness': 1.02, - 'bass_motion': 'locked', - 'melodic_motion': 'restrained', - 'pan_width': 0.14, - 'fx_bias': 0.88, - 'bus_names': { - 'drums': 'DRUM DEEP', - 'bass': 'SUB DEEP', - 'music': 'ATMOS DEEP', - 'vocal': 'VOX DEEP', - 'fx': 'FX DEEP', - }, - 'return_names': { - 'space': 'REVERB DEEP', - 'echo': 'DELAY DEEP', - 'heat': 'SATURATE DEEP', - 'glue': 'GLUE MINIMAL', - }, - }, - { - 'name': 'tech-house-funky', - 'genres': {'tech-house'}, - 'drum_tightness': 0.86, - 'bass_motion': 'syncopated', - 'melodic_motion': 'hooky', - 'pan_width': 0.24, - 'fx_bias': 1.12, - 'bus_names': { - 'drums': 'DRUM GROOVE', - 'bass': 'BASS FUNK', - 'music': 'MUSIC GROOVE', - 'vocal': 'VOCAL FUNK', - 'fx': 'FX SWING', - }, - 'return_names': { - 'space': 'REVERB GROOVE', - 'echo': 'DELAY GROOVE', - 'heat': 'DRIVE FUNK', - 'glue': 'GLUE SWING', - }, - }, -) - -ROLE_FX_CHAINS = { - 'sc_trigger': [ - {'device': 'Utility', 'parameters': {'Gain': 0.0, 'Width': 0.0}}, - ], - 'kick': [ - {'device': 'Saturator', 'parameters': {'Drive': 2.5}}, - ], - 'clap': [ - {'device': 'Hybrid Reverb', 'parameters': {'Dry/Wet': 0.08}}, - ], - 'snare_fill': [ - {'device': 'Echo', 'parameters': {'Dry/Wet': 0.08}}, - ], - 'hat_closed': [ - {'device': 'Auto Filter', 'parameters': {'Frequency': 15000.0, 'Dry/Wet': 0.14}}, - ], - 'hat_open': [ - {'device': 'Auto Filter', 'parameters': {'Frequency': 12000.0, 'Dry/Wet': 0.18}}, - ], - 'top_loop': [ - {'device': 'Auto Filter', 'parameters': {'Frequency': 11000.0, 'Dry/Wet': 0.22}}, - ], - 'perc': [ - {'device': 'Auto Filter', 'parameters': {'Frequency': 9500.0, 'Dry/Wet': 0.16}}, - ], - 'ride': [ - {'device': 'Auto Filter', 'parameters': {'Frequency': 12500.0, 'Dry/Wet': 0.12}}, - ], - 'sub_bass': [ - {'device': 'Utility', 'parameters': {'Width': 0.0}}, - ], - 'bass': [ - {'device': 'Saturator', 'parameters': {'Drive': 4.0}}, - {'device': 'Auto Filter', 'parameters': {'Frequency': 7800.0, 'Dry/Wet': 0.12}}, - ], - 'drone': [ - {'device': 'Hybrid Reverb', 'parameters': {'Dry/Wet': 0.16}}, - ], - 'chords': [ - {'device': 'Auto Filter', 'parameters': {'Frequency': 9800.0, 'Dry/Wet': 0.14}}, - ], - 'stab': [ - {'device': 'Saturator', 'parameters': {'Drive': 3.0}}, - ], - 'pad': [ - {'device': 'Hybrid Reverb', 'parameters': {'Dry/Wet': 0.18}}, - ], - 'pluck': [ - {'device': 'Echo', 'parameters': {'Dry/Wet': 0.12}}, - ], - 'arp': [ - {'device': 'Echo', 'parameters': {'Dry/Wet': 0.16}}, - ], - 'lead': [ - {'device': 'Saturator', 'parameters': {'Drive': 2.0}}, - {'device': 'Echo', 'parameters': {'Dry/Wet': 0.12}}, - ], - 'counter': [ - {'device': 'Echo', 'parameters': {'Dry/Wet': 0.1}}, - ], - 'crash': [ - {'device': 'Hybrid Reverb', 'parameters': {'Dry/Wet': 0.16}}, - ], - 'reverse_fx': [ - {'device': 'Hybrid Reverb', 'parameters': {'Dry/Wet': 0.24}}, - ], - 'riser': [ - {'device': 'Hybrid Reverb', 'parameters': {'Dry/Wet': 0.28}}, - ], - 'impact': [ - {'device': 'Hybrid Reverb', 'parameters': {'Dry/Wet': 0.12}}, - ], - 'atmos': [ - {'device': 'Hybrid Reverb', 'parameters': {'Dry/Wet': 0.3}}, - ], - 'vocal': [ - {'device': 'Echo', 'parameters': {'Dry/Wet': 0.14}}, - ], -} - -SCRIPTS_ROOT = Path(__file__).resolve().parents[2] -REFERENCE_SEARCH_DIRS = ( - SCRIPTS_ROOT / 'sample', - SCRIPTS_ROOT / 'samples', -) -REFERENCE_TRACK_PROFILES = [ - { - 'name': 'Eli Brown x GeezLy - Me Gusta', - 'match_terms': ['eli brown', 'geezly', 'me gusta'], - 'genre': 'tech-house', - 'style': 'latin-industrial', - 'bpm': 136.0, - 'key': 'F#m', - 'structure': 'club', - 'reference_bars': 112, - }, - { - 'name': 'Mr. Pauer, Goyo - Química', - 'match_terms': ['mr. pauer', 'goyo', 'química'], - 'genre': 'house', - 'style': 'latin-funky vocal', - 'bpm': 123.0, - 'key': 'Cm', - 'structure': 'extended', - 'reference_bars': 72, - }, -] - -# ========================================================================= -# SECTION AUTOMATION PARAMETERS -# ========================================================================= - -SECTION_AUTOMATION = { - 'intro': { - 'energy': 0.25, - 'filters': { - 'drums': {'frequency': 8500.0, 'resonance': 0.3, 'dry_wet': 0.12}, - 'bass': {'frequency': 6200.0, 'resonance': 0.25, 'dry_wet': 0.08}, - 'music': {'frequency': 7800.0, 'resonance': 0.2, 'dry_wet': 0.1}, - 'vocal': {'frequency': 9200.0, 'resonance': 0.15, 'dry_wet': 0.06}, - 'fx': {'frequency': 8800.0, 'resonance': 0.18, 'dry_wet': 0.14}, - }, - 'reverb': {'send_level': 0.28, 'decay_time': 2.8, 'size': 0.85}, - 'delay': {'send_level': 0.18, 'feedback': 0.35, 'time_l': 0.375, 'time_r': 0.5}, - 'compression': {'threshold': -14.0, 'ratio': 2.0, 'attack': 0.015, 'release': 0.12}, - 'saturation': {'drive': 0.8, 'mix': 0.15}, - 'stereo_width': {'value': 0.92}, - 'envelope_curve': 'ease_in', - }, - 'build': { - 'energy': 0.72, - 'filters': { - 'drums': {'frequency': 4200.0, 'resonance': 0.45, 'dry_wet': 0.22}, - 'bass': {'frequency': 3800.0, 'resonance': 0.35, 'dry_wet': 0.16}, - 'music': {'frequency': 5400.0, 'resonance': 0.28, 'dry_wet': 0.18}, - 'vocal': {'frequency': 6800.0, 'resonance': 0.22, 'dry_wet': 0.12}, - 'fx': {'frequency': 5200.0, 'resonance': 0.32, 'dry_wet': 0.24}, - }, - 'reverb': {'send_level': 0.18, 'decay_time': 2.2, 'size': 0.72}, - 'delay': {'send_level': 0.32, 'feedback': 0.48, 'time_l': 0.375, 'time_r': 0.5}, - 'compression': {'threshold': -10.0, 'ratio': 3.5, 'attack': 0.008, 'release': 0.08}, - 'saturation': {'drive': 2.2, 'mix': 0.28}, - 'stereo_width': {'value': 1.08}, - 'envelope_curve': 'ramp_up', - }, - 'drop': { - 'energy': 1.0, - 'filters': { - 'drums': {'frequency': 14500.0, 'resonance': 0.2, 'dry_wet': 0.04}, - 'bass': {'frequency': 9800.0, 'resonance': 0.15, 'dry_wet': 0.03}, - 'music': {'frequency': 12200.0, 'resonance': 0.12, 'dry_wet': 0.05}, - 'vocal': {'frequency': 12800.0, 'resonance': 0.1, 'dry_wet': 0.04}, - 'fx': {'frequency': 11000.0, 'resonance': 0.15, 'dry_wet': 0.08}, - }, - 'reverb': {'send_level': 0.12, 'decay_time': 1.6, 'size': 0.55}, - 'delay': {'send_level': 0.14, 'feedback': 0.28, 'time_l': 0.25, 'time_r': 0.375}, - 'compression': {'threshold': -6.0, 'ratio': 4.5, 'attack': 0.005, 'release': 0.06}, - 'saturation': {'drive': 3.5, 'mix': 0.38}, - 'stereo_width': {'value': 1.18}, - 'envelope_curve': 'punch', - }, - 'break': { - 'energy': 0.38, - 'filters': { - 'drums': {'frequency': 5200.0, 'resonance': 0.55, 'dry_wet': 0.32}, - 'bass': {'frequency': 2800.0, 'resonance': 0.45, 'dry_wet': 0.24}, - 'music': {'frequency': 6400.0, 'resonance': 0.35, 'dry_wet': 0.22}, - 'vocal': {'frequency': 8200.0, 'resonance': 0.28, 'dry_wet': 0.16}, - 'fx': {'frequency': 6800.0, 'resonance': 0.38, 'dry_wet': 0.28}, - }, - 'reverb': {'send_level': 0.42, 'decay_time': 3.5, 'size': 1.0}, - 'delay': {'send_level': 0.38, 'feedback': 0.52, 'time_l': 0.5, 'time_r': 0.75}, - 'compression': {'threshold': -18.0, 'ratio': 1.8, 'attack': 0.025, 'release': 0.18}, - 'saturation': {'drive': 0.5, 'mix': 0.1}, - 'stereo_width': {'value': 1.25}, - 'envelope_curve': 'ease_out', - }, - 'outro': { - 'energy': 0.32, - 'filters': { - 'drums': {'frequency': 6200.0, 'resonance': 0.35, 'dry_wet': 0.18}, - 'bass': {'frequency': 4200.0, 'resonance': 0.28, 'dry_wet': 0.14}, - 'music': {'frequency': 5600.0, 'resonance': 0.25, 'dry_wet': 0.16}, - 'vocal': {'frequency': 7200.0, 'resonance': 0.2, 'dry_wet': 0.1}, - 'fx': {'frequency': 6400.0, 'resonance': 0.28, 'dry_wet': 0.2}, - }, - 'reverb': {'send_level': 0.35, 'decay_time': 3.2, 'size': 0.92}, - 'delay': {'send_level': 0.28, 'feedback': 0.42, 'time_l': 0.375, 'time_r': 0.5}, - 'compression': {'threshold': -12.0, 'ratio': 2.2, 'attack': 0.018, 'release': 0.15}, - 'saturation': {'drive': 0.6, 'mix': 0.12}, - 'stereo_width': {'value': 0.98}, - 'envelope_curve': 'ease_out', - }, -} - -# Envelope curve templates for automation interpolation -ENVELOPE_CURVES = { - 'linear': lambda x: x, - 'ease_in': lambda x: x * x, - 'ease_out': lambda x: 1 - (1 - x) ** 2, - 'ease_in_out': lambda x: 3 * x * x - 2 * x * x * x, - 'ramp_up': lambda x: x ** 0.5, - 'ramp_down': lambda x: 1 - (1 - x) ** 2, - 'punch': lambda x: min(1.0, x * 2.0) if x < 0.5 else 1.0 - (1.0 - x) ** 0.5, - 's_curve': lambda x: 1 / (1 + (2.71828 ** (-10 * (x - 0.5)))), - 'exponential': lambda x: (2.71828 ** (x - 1) - 0.3679) / 0.6321, -} - -# ============================================================================= -# AUTOMATIZACION DE DEVICES POR SECCION - FASE 2 -# Parametros especificos por device para cada tipo de seccion -# ============================================================================= - -# Automatizacion de devices en tracks individuales por rol - ENHANCED -SECTION_DEVICE_AUTOMATION = { - # BASS - Filtros, drive y compresion dinamica - 'bass': { - 'Saturator': { - 'Drive': {'intro': 1.5, 'build': 3.5, 'drop': 5.0, 'break': 2.0, 'outro': 1.8}, - 'Dry/Wet': {'intro': 0.12, 'build': 0.22, 'drop': 0.30, 'break': 0.15, 'outro': 0.10}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 6200.0, 'build': 8500.0, 'drop': 12000.0, 'break': 4800.0, 'outro': 5800.0}, - 'Dry/Wet': {'intro': 0.08, 'build': 0.18, 'drop': 0.12, 'break': 0.22, 'outro': 0.06}, - 'Resonance': {'intro': 0.25, 'build': 0.35, 'drop': 0.20, 'break': 0.40, 'outro': 0.28}, - }, - 'Compressor': { - 'Threshold': {'intro': -12.0, 'build': -14.0, 'drop': -18.0, 'break': -10.0, 'outro': -11.0}, - 'Ratio': {'intro': 2.5, 'build': 3.0, 'drop': 4.0, 'break': 2.0, 'outro': 2.2}, - }, - 'Utility': { - 'Stereo Width': {'intro': 0.0, 'build': 0.0, 'drop': 0.0, 'break': 0.0, 'outro': 0.0}, - }, - }, - 'sub_bass': { - 'Saturator': { - 'Drive': {'intro': 1.0, 'build': 2.5, 'drop': 4.0, 'break': 1.5, 'outro': 1.2}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 5200.0, 'build': 7200.0, 'drop': 10000.0, 'break': 4200.0, 'outro': 4800.0}, - 'Dry/Wet': {'intro': 0.05, 'build': 0.10, 'drop': 0.06, 'break': 0.14, 'outro': 0.04}, - }, - 'Utility': { - 'Width': {'intro': 0.0, 'build': 0.0, 'drop': 0.0, 'break': 0.0, 'outro': 0.0}, - 'Gain': {'intro': 0.0, 'build': 0.2, 'drop': 0.4, 'break': -0.2, 'outro': 0.0}, - }, - }, - # PAD - Filtros envolventes con width y reverb - 'pad': { - 'Auto Filter': { - 'Frequency': {'intro': 4500.0, 'build': 8000.0, 'drop': 11000.0, 'break': 3200.0, 'outro': 4000.0}, - 'Dry/Wet': {'intro': 0.25, 'build': 0.18, 'drop': 0.12, 'break': 0.35, 'outro': 0.28}, - 'Resonance': {'intro': 0.20, 'build': 0.28, 'drop': 0.15, 'break': 0.35, 'outro': 0.22}, - }, - 'Hybrid Reverb': { - 'Dry/Wet': {'intro': 0.22, 'build': 0.16, 'drop': 0.10, 'break': 0.28, 'outro': 0.24}, - 'Decay Time': {'intro': 3.5, 'build': 2.8, 'drop': 2.0, 'break': 4.2, 'outro': 3.8}, - }, - 'Utility': { - 'Stereo Width': {'intro': 0.85, 'build': 1.02, 'drop': 1.12, 'break': 1.25, 'outro': 0.90}, - }, - 'Saturator': { - 'Drive': {'intro': 0.8, 'build': 1.5, 'drop': 2.5, 'break': 0.6, 'outro': 0.7}, - 'Dry/Wet': {'intro': 0.10, 'build': 0.15, 'drop': 0.20, 'break': 0.08, 'outro': 0.12}, - }, - }, - # ATMOS - Filtros espaciales con movement - 'atmos': { - 'Auto Filter': { - 'Frequency': {'intro': 3800.0, 'build': 7200.0, 'drop': 9800.0, 'break': 2800.0, 'outro': 3500.0}, - 'Dry/Wet': {'intro': 0.30, 'build': 0.22, 'drop': 0.15, 'break': 0.40, 'outro': 0.32}, - 'Resonance': {'intro': 0.22, 'build': 0.32, 'drop': 0.18, 'break': 0.42, 'outro': 0.25}, - }, - 'Hybrid Reverb': { - 'Dry/Wet': {'intro': 0.35, 'build': 0.28, 'drop': 0.18, 'break': 0.42, 'outro': 0.38}, - 'Decay Time': {'intro': 4.0, 'build': 3.2, 'drop': 2.2, 'break': 5.0, 'outro': 4.5}, - }, - 'Utility': { - 'Stereo Width': {'intro': 0.70, 'build': 0.88, 'drop': 1.05, 'break': 1.20, 'outro': 0.75}, - }, - }, - # FX ELEMENTS - 'reverse_fx': { - 'Auto Filter': { - 'Frequency': {'intro': 5200.0, 'build': 9000.0, 'drop': 12000.0, 'break': 6000.0, 'outro': 4800.0}, - 'Dry/Wet': {'intro': 0.20, 'build': 0.28, 'drop': 0.15, 'break': 0.35, 'outro': 0.22}, - }, - 'Hybrid Reverb': { - 'Dry/Wet': {'intro': 0.30, 'build': 0.35, 'drop': 0.20, 'break': 0.40, 'outro': 0.28}, - 'Decay Time': {'intro': 3.0, 'build': 4.5, 'drop': 2.5, 'break': 5.5, 'outro': 3.5}, - }, - 'Saturator': { - 'Drive': {'intro': 1.2, 'build': 2.8, 'drop': 4.5, 'break': 1.8, 'outro': 1.0}, - }, - }, - 'riser': { - 'Auto Filter': { - 'Frequency': {'intro': 4000.0, 'build': 10000.0, 'drop': 14000.0, 'break': 5500.0, 'outro': 4200.0}, - 'Dry/Wet': {'intro': 0.15, 'build': 0.30, 'drop': 0.12, 'break': 0.22, 'outro': 0.18}, - }, - 'Hybrid Reverb': { - 'Dry/Wet': {'intro': 0.25, 'build': 0.40, 'drop': 0.22, 'break': 0.35, 'outro': 0.20}, - 'Decay Time': {'intro': 2.5, 'build': 5.0, 'drop': 3.0, 'break': 4.0, 'outro': 2.8}, - }, - 'Echo': { - 'Dry/Wet': {'intro': 0.18, 'build': 0.35, 'drop': 0.15, 'break': 0.25, 'outro': 0.15}, - 'Feedback': {'intro': 0.30, 'build': 0.55, 'drop': 0.25, 'break': 0.45, 'outro': 0.28}, - }, - 'Saturator': { - 'Drive': {'intro': 1.5, 'build': 4.0, 'drop': 3.0, 'break': 2.5, 'outro': 1.2}, - }, - }, - 'impact': { - 'Hybrid Reverb': { - 'Dry/Wet': {'intro': 0.15, 'build': 0.18, 'drop': 0.12, 'break': 0.20, 'outro': 0.14}, - 'Decay Time': {'intro': 2.0, 'build': 2.5, 'drop': 1.8, 'break': 3.0, 'outro': 2.2}, - }, - 'Saturator': { - 'Drive': {'intro': 1.8, 'build': 2.5, 'drop': 3.5, 'break': 2.0, 'outro': 1.5}, - }, - }, - 'drone': { - 'Auto Filter': { - 'Frequency': {'intro': 3000.0, 'build': 6500.0, 'drop': 9000.0, 'break': 2500.0, 'outro': 2800.0}, - 'Dry/Wet': {'intro': 0.20, 'build': 0.15, 'drop': 0.10, 'break': 0.30, 'outro': 0.22}, - 'Resonance': {'intro': 0.25, 'build': 0.35, 'drop': 0.22, 'break': 0.40, 'outro': 0.28}, - }, - 'Hybrid Reverb': { - 'Dry/Wet': {'intro': 0.18, 'build': 0.14, 'drop': 0.08, 'break': 0.25, 'outro': 0.20}, - 'Decay Time': {'intro': 4.5, 'build': 3.5, 'drop': 2.5, 'break': 5.5, 'outro': 4.8}, - }, - 'Saturator': { - 'Drive': {'intro': 0.8, 'build': 1.8, 'drop': 2.8, 'break': 0.6, 'outro': 0.7}, - }, - }, - # HATS - Filtros de brillantez con resonance y saturacion - 'hat_closed': { - 'Auto Filter': { - 'Frequency': {'intro': 12000.0, 'build': 14000.0, 'drop': 16000.0, 'break': 10000.0, 'outro': 11000.0}, - 'Dry/Wet': {'intro': 0.12, 'build': 0.16, 'drop': 0.10, 'break': 0.20, 'outro': 0.14}, - 'Resonance': {'intro': 0.15, 'build': 0.25, 'drop': 0.12, 'outro': 0.18, 'break': 0.30}, - }, - 'Saturator': { - 'Drive': {'intro': 0.5, 'build': 1.2, 'drop': 1.8, 'break': 0.8, 'outro': 0.6}, - }, - }, - 'hat_open': { - 'Auto Filter': { - 'Frequency': {'intro': 9000.0, 'build': 11000.0, 'drop': 13000.0, 'break': 7500.0, 'outro': 8500.0}, - 'Dry/Wet': {'intro': 0.18, 'build': 0.22, 'drop': 0.14, 'break': 0.28, 'outro': 0.20}, - 'Resonance': {'intro': 0.18, 'build': 0.28, 'drop': 0.15, 'outro': 0.20, 'break': 0.35}, - }, - 'Echo': { - 'Dry/Wet': {'intro': 0.08, 'build': 0.15, 'drop': 0.10, 'break': 0.22, 'outro': 0.12}, - }, - }, - 'top_loop': { - 'Auto Filter': { - 'Frequency': {'intro': 8500.0, 'build': 10500.0, 'drop': 12500.0, 'break': 7000.0, 'outro': 8000.0}, - 'Dry/Wet': {'intro': 0.20, 'build': 0.25, 'drop': 0.16, 'break': 0.32, 'outro': 0.22}, - 'Resonance': {'intro': 0.12, 'build': 0.22, 'drop': 0.14, 'outro': 0.15, 'break': 0.28}, - }, - 'Echo': { - 'Dry/Wet': {'intro': 0.05, 'build': 0.12, 'drop': 0.08, 'break': 0.18, 'outro': 0.10}, - }, - }, - # SYNTHS - 'chords': { - 'Auto Filter': { - 'Frequency': {'intro': 5500.0, 'build': 8500.0, 'drop': 11000.0, 'break': 4000.0, 'outro': 5000.0}, - 'Dry/Wet': {'intro': 0.15, 'build': 0.20, 'drop': 0.12, 'break': 0.28, 'outro': 0.18}, - 'Resonance': {'intro': 0.18, 'build': 0.28, 'drop': 0.15, 'outro': 0.20, 'break': 0.35}, - }, - 'Echo': { - 'Dry/Wet': {'intro': 0.10, 'build': 0.18, 'drop': 0.08, 'break': 0.22, 'outro': 0.12}, - 'Feedback': {'intro': 0.25, 'build': 0.40, 'drop': 0.30, 'break': 0.45, 'outro': 0.28}, - }, - 'Saturator': { - 'Drive': {'intro': 1.2, 'build': 2.2, 'drop': 3.5, 'break': 1.5, 'outro': 1.0}, - }, - 'Utility': { - 'Stereo Width': {'intro': 0.95, 'build': 1.05, 'drop': 1.15, 'break': 1.25, 'outro': 1.00}, - }, - }, - 'lead': { - 'Saturator': { - 'Drive': {'intro': 1.0, 'build': 2.5, 'drop': 4.0, 'break': 1.5, 'outro': 1.2}, - 'Dry/Wet': {'intro': 0.12, 'build': 0.20, 'drop': 0.25, 'break': 0.10, 'outro': 0.15}, - }, - 'Echo': { - 'Dry/Wet': {'intro': 0.08, 'build': 0.15, 'drop': 0.10, 'break': 0.18, 'outro': 0.10}, - 'Feedback': {'intro': 0.20, 'build': 0.35, 'drop': 0.28, 'break': 0.40, 'outro': 0.22}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 6500.0, 'build': 9500.0, 'drop': 12500.0, 'break': 4500.0, 'outro': 5500.0}, - 'Dry/Wet': {'intro': 0.08, 'build': 0.15, 'drop': 0.10, 'break': 0.20, 'outro': 0.12}, - }, - 'Utility': { - 'Stereo Width': {'intro': 0.90, 'build': 1.02, 'drop': 1.10, 'break': 1.18, 'outro': 0.95}, - }, - }, - 'stab': { - 'Saturator': { - 'Drive': {'intro': 2.0, 'build': 3.5, 'drop': 5.0, 'break': 2.5, 'outro': 2.2}, - 'Dry/Wet': {'intro': 0.18, 'build': 0.25, 'drop': 0.30, 'break': 0.15, 'outro': 0.20}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 6000.0, 'build': 9000.0, 'drop': 12000.0, 'break': 5000.0, 'outro': 5500.0}, - 'Dry/Wet': {'intro': 0.10, 'build': 0.15, 'drop': 0.08, 'break': 0.22, 'outro': 0.12}, - }, - 'Utility': { - 'Stereo Width': {'intro': 0.88, 'build': 1.00, 'drop': 1.12, 'break': 1.20, 'outro': 0.92}, - }, - }, - 'pluck': { - 'Echo': { - 'Dry/Wet': {'intro': 0.12, 'build': 0.22, 'drop': 0.14, 'break': 0.28, 'outro': 0.15}, - 'Feedback': {'intro': 0.30, 'build': 0.45, 'drop': 0.35, 'break': 0.50, 'outro': 0.32}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 7000.0, 'build': 10000.0, 'drop': 13000.0, 'break': 5500.0, 'outro': 6500.0}, - 'Dry/Wet': {'intro': 0.08, 'build': 0.15, 'drop': 0.10, 'break': 0.20, 'outro': 0.12}, - }, - 'Saturator': { - 'Drive': {'intro': 0.8, 'build': 1.8, 'drop': 2.8, 'break': 1.2, 'outro': 0.9}, - }, - }, - 'arp': { - 'Echo': { - 'Dry/Wet': {'intro': 0.15, 'build': 0.28, 'drop': 0.18, 'break': 0.35, 'outro': 0.18}, - 'Feedback': {'intro': 0.35, 'build': 0.50, 'drop': 0.40, 'break': 0.58, 'outro': 0.38}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 6500.0, 'build': 9500.0, 'drop': 12500.0, 'break': 5000.0, 'outro': 6000.0}, - 'Dry/Wet': {'intro': 0.12, 'build': 0.18, 'drop': 0.14, 'break': 0.25, 'outro': 0.15}, - }, - 'Saturator': { - 'Drive': {'intro': 0.6, 'build': 1.5, 'drop': 2.5, 'break': 1.0, 'outro': 0.7}, - }, - }, - 'counter': { - 'Echo': { - 'Dry/Wet': {'intro': 0.10, 'build': 0.18, 'drop': 0.12, 'break': 0.22, 'outro': 0.12}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 6000.0, 'build': 8800.0, 'drop': 11500.0, 'break': 4800.0, 'outro': 5200.0}, - 'Dry/Wet': {'intro': 0.10, 'build': 0.16, 'drop': 0.12, 'break': 0.22, 'outro': 0.14}, - }, - 'Utility': { - 'Stereo Width': {'intro': 0.75, 'build': 0.92, 'drop': 1.08, 'break': 1.15, 'outro': 0.80}, - }, - }, - # VOCAL - 'vocal': { - 'Echo': { - 'Dry/Wet': {'intro': 0.12, 'build': 0.25, 'drop': 0.15, 'break': 0.30, 'outro': 0.14}, - 'Feedback': {'intro': 0.25, 'build': 0.42, 'drop': 0.30, 'break': 0.48, 'outro': 0.28}, - }, - 'Hybrid Reverb': { - 'Dry/Wet': {'intro': 0.08, 'build': 0.15, 'drop': 0.06, 'break': 0.18, 'outro': 0.10}, - 'Decay Time': {'intro': 2.5, 'build': 3.5, 'drop': 2.0, 'break': 4.0, 'outro': 2.8}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 6000.0, 'build': 9000.0, 'drop': 11000.0, 'break': 5000.0, 'outro': 5500.0}, - 'Dry/Wet': {'intro': 0.10, 'build': 0.16, 'drop': 0.10, 'break': 0.20, 'outro': 0.12}, - }, - 'Saturator': { - 'Drive': {'intro': 0.8, 'build': 1.8, 'drop': 2.5, 'break': 1.2, 'outro': 0.9}, - }, - }, - # DRUMS - Sin automatizacion de devices (manejados por volumen/sends) - 'kick': {}, - 'clap': {}, - 'snare_fill': {}, - 'perc': {}, - 'ride': {}, - 'tom_fill': {}, - 'crash': {}, - 'sc_trigger': {}, -} - -# Automatizacion de devices en BUSES por seccion - ENHANCED -BUS_DEVICE_AUTOMATION = { - 'drums': { - 'Compressor': { - 'Threshold': {'intro': -14.0, 'build': -16.0, 'drop': -18.5, 'break': -12.0, 'outro': -13.5}, - 'Ratio': {'intro': 2.5, 'build': 3.0, 'drop': 4.0, 'break': 2.2, 'outro': 2.4}, - 'Attack': {'intro': 0.015, 'build': 0.010, 'drop': 0.005, 'break': 0.020, 'outro': 0.018}, - }, - 'Saturator': { - 'Drive': {'intro': 0.8, 'build': 1.5, 'drop': 2.5, 'break': 1.0, 'outro': 0.9}, - 'Dry/Wet': {'intro': 0.08, 'build': 0.15, 'drop': 0.22, 'break': 0.10, 'outro': 0.10}, - }, - 'Limiter': { - 'Gain': {'intro': 0.2, 'build': 0.3, 'drop': 0.5, 'break': 0.15, 'outro': 0.18}, - }, - 'AutoFilter': { - 'Frequency': {'intro': 8500.0, 'build': 12500.0, 'drop': 16000.0, 'break': 4500.0, 'outro': 6500.0}, - 'Dry/Wet': {'intro': 0.10, 'build': 0.22, 'drop': 0.04, 'break': 0.35, 'outro': 0.18}, - 'Resonance': {'intro': 0.20, 'build': 0.12, 'drop': 0.08, 'break': 0.50, 'outro': 0.28}, - }, - }, - 'bass': { - 'Saturator': { - 'Drive': {'intro': 1.0, 'build': 2.0, 'drop': 3.5, 'break': 1.5, 'outro': 1.2}, - 'Dry/Wet': {'intro': 0.10, 'build': 0.18, 'drop': 0.25, 'break': 0.12, 'outro': 0.10}, - }, - 'Compressor': { - 'Threshold': {'intro': -15.0, 'build': -17.0, 'drop': -20.0, 'break': -14.0, 'outro': -14.5}, - 'Ratio': {'intro': 3.0, 'build': 3.5, 'drop': 4.5, 'break': 2.8, 'outro': 3.0}, - 'Attack': {'intro': 0.020, 'build': 0.015, 'drop': 0.008, 'break': 0.025, 'outro': 0.022}, - }, - 'Utility': { - 'Stereo Width': {'intro': 0.0, 'build': 0.0, 'drop': 0.0, 'break': 0.0, 'outro': 0.0}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 4800.0, 'build': 8500.0, 'drop': 12000.0, 'break': 3200.0, 'outro': 4200.0}, - 'Dry/Wet': {'intro': 0.08, 'build': 0.15, 'drop': 0.05, 'break': 0.25, 'outro': 0.12}, - 'Resonance': {'intro': 0.18, 'build': 0.12, 'drop': 0.08, 'break': 0.45, 'outro': 0.22}, - }, - }, - 'music': { - 'Compressor': { - 'Threshold': {'intro': -19.0, 'build': -20.0, 'drop': -22.0, 'break': -18.0, 'outro': -18.5}, - 'Ratio': {'intro': 2.0, 'build': 2.5, 'drop': 3.0, 'break': 1.8, 'outro': 2.0}, - 'Attack': {'intro': 0.025, 'build': 0.020, 'drop': 0.015, 'break': 0.030, 'outro': 0.028}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 7500.0, 'build': 12000.0, 'drop': 16000.0, 'break': 4500.0, 'outro': 6000.0}, - 'Dry/Wet': {'intro': 0.12, 'build': 0.18, 'drop': 0.03, 'break': 0.30, 'outro': 0.15}, - 'Resonance': {'intro': 0.18, 'build': 0.10, 'drop': 0.06, 'break': 0.40, 'outro': 0.22}, - }, - 'Utility': { - 'Stereo Width': {'intro': 1.02, 'build': 1.08, 'drop': 1.12, 'break': 1.25, 'outro': 1.05}, - }, - 'Saturator': { - 'Drive': {'intro': 0.3, 'build': 0.8, 'drop': 1.5, 'break': 0.4, 'outro': 0.35}, - 'Dry/Wet': {'intro': 0.05, 'build': 0.10, 'drop': 0.15, 'break': 0.08, 'outro': 0.06}, - }, - }, - 'vocal': { - 'Echo': { - 'Dry/Wet': {'intro': 0.06, 'build': 0.12, 'drop': 0.05, 'break': 0.18, 'outro': 0.08}, - 'Feedback': {'intro': 0.25, 'build': 0.42, 'drop': 0.28, 'break': 0.50, 'outro': 0.30}, - }, - 'Compressor': { - 'Threshold': {'intro': -16.0, 'build': -17.0, 'drop': -19.0, 'break': -15.0, 'outro': -15.5}, - 'Ratio': {'intro': 2.8, 'build': 3.2, 'drop': 3.8, 'break': 2.5, 'outro': 2.7}, - }, - 'Hybrid Reverb': { - 'Dry/Wet': {'intro': 0.06, 'build': 0.10, 'drop': 0.03, 'break': 0.16, 'outro': 0.08}, - 'Decay Time': {'intro': 2.2, 'build': 3.0, 'drop': 1.6, 'break': 4.0, 'outro': 2.5}, - }, - 'Auto Filter': { - 'Frequency': {'intro': 8000.0, 'build': 11500.0, 'drop': 14500.0, 'break': 6000.0, 'outro': 7200.0}, - 'Dry/Wet': {'intro': 0.08, 'build': 0.12, 'drop': 0.04, 'break': 0.22, 'outro': 0.10}, - 'Resonance': {'intro': 0.15, 'build': 0.10, 'drop': 0.06, 'break': 0.32, 'outro': 0.18}, - }, - }, - 'fx': { - 'Auto Filter': { - 'Frequency': {'intro': 6000.0, 'build': 10500.0, 'drop': 14000.0, 'break': 4000.0, 'outro': 5200.0}, - 'Dry/Wet': {'intro': 0.15, 'build': 0.20, 'drop': 0.06, 'outro': 0.18, 'break': 0.35}, - 'Resonance': {'intro': 0.18, 'build': 0.15, 'drop': 0.10, 'break': 0.42, 'outro': 0.22}, - }, - 'Hybrid Reverb': { - 'Dry/Wet': {'intro': 0.20, 'build': 0.25, 'drop': 0.10, 'break': 0.38, 'outro': 0.22}, - 'Decay Time': {'intro': 3.0, 'build': 3.8, 'drop': 2.0, 'break': 5.0, 'outro': 3.5}, - }, - 'Limiter': { - 'Gain': {'intro': -0.3, 'build': 0.0, 'drop': 0.2, 'break': -0.5, 'outro': -0.2}, - }, - 'Saturator': { - 'Drive': {'intro': 0.5, 'build': 1.5, 'drop': 2.2, 'break': 0.8, 'outro': 0.6}, - 'Dry/Wet': {'intro': 0.08, 'build': 0.14, 'drop': 0.20, 'break': 0.10, 'outro': 0.10}, - }, - }, -} - -# Automatizacion de devices en MASTER por seccion - ENHANCED -MASTER_DEVICE_AUTOMATION = { - 'Utility': {'Stereo Width': {'intro': 1.04, 'build': 1.08, 'drop': 1.10, 'break': 1.12, 'outro': 1.06}, - 'Gain': {'intro': 0.72, 'build': 0.88, 'drop': 1.0, 'break': 0.68, 'outro': 0.70}, - }, - 'Saturator': {'Drive': {'intro': 0.18, 'build': 0.30, 'drop': 0.45, 'break': 0.12, 'outro': 0.15}, - 'Dry/Wet': {'intro': 0.08, 'build': 0.15, 'drop': 0.22, 'break': 0.06, 'outro': 0.10}, - }, - 'Compressor': {'Ratio': {'intro': 0.55, 'build': 0.62, 'drop': 0.68, 'break': 0.50, 'outro': 0.52}, - 'Threshold': {'intro': -10.0, 'build': -12.0, 'drop': -13.5, 'break': -8.0, 'outro': -9.0}, - 'Attack': {'intro': 0.020, 'build': 0.015, 'drop': 0.010, 'break': 0.025, 'outro': 0.022}, - 'Release': {'intro': 0.15, 'build': 0.12, 'drop': 0.10, 'break': 0.18, 'outro': 0.16}, - }, - 'Limiter': {'Gain': {'intro': 1.05, 'build': 1.12, 'drop': 1.20, 'break': 1.00, 'outro': 1.02}, - 'Ceiling': {'intro': -0.5, 'build': -0.7, 'drop': -0.9, 'break': -0.4, 'outro': -0.45}, - }, - 'Auto Filter': {'Frequency': {'intro': 8500.0, 'build': 12000.0, 'drop': 16000.0, 'break': 5500.0, 'outro': 7500.0}, - 'Dry/Wet': {'intro': 0.04, 'build': 0.02, 'drop': 0.01, 'break': 0.06, 'outro': 0.05}, - }, - 'Echo': {'Dry/Wet': {'intro': 0.02, 'build': 0.05, 'drop': 0.03, 'break': 0.07, 'outro': 0.03}, - 'Feedback': {'intro': 0.15, 'build': 0.25, 'drop': 0.18, 'break': 0.30, 'outro': 0.20}, - }, -} - -DEVICE_PARAMETER_SAFETY_CLAMPS = { - 'Drive': {'min': 0.0, 'max': 6.0}, - 'Frequency': {'min': 20.0, 'max': 20000.0}, - 'Dry/Wet': {'min': 0.0, 'max': 1.0}, - 'Feedback': {'min': 0.0, 'max': 0.7}, - 'Stereo Width': {'min': 0.0, 'max': 1.3}, - 'Resonance': {'min': 0.0, 'max': 1.0}, - 'Ratio': {'min': 1.0, 'max': 20.0}, - 'Threshold': {'min': -60.0, 'max': 0.0}, - 'Attack': {'min': 0.0001, 'max': 0.5}, - 'Release': {'min': 0.001, 'max': 2.0}, - 'Gain': {'min': -1.0, 'max': 1.8}, - 'Decay Time': {'min': 0.1, 'max': 10.0}, -} - -MASTER_SAFETY_CLAMPS = { - 'Stereo Width': {'min': 0.0, 'max': 1.25}, - 'Drive': {'min': 0.0, 'max': 1.5}, - 'Ratio': {'min': 0.45, 'max': 0.9}, - 'Gain': {'min': 0.0, 'max': 1.6}, - 'Attack': {'min': 0.0001, 'max': 0.1}, - 'Ceiling': {'min': -3.0, 'max': 0.0}, - 'Threshold': {'min': -20.0, 'max': 0.0}, - 'Release': {'min': 0.001, 'max': 1.0}, -} - -# Expanded configuration de variación por sección -SECTION_VARIATION_CONFIG = { - 'perc': { - 'intro': {'sparse': True, 'intensity': 0.3, 'variant': 'ghost'}, - 'build': {'building': True, 'intensity': 0.8, 'variant': 'layering'}, - 'drop': {'full': True, 'intensity': 1.0, 'variant': 'layered'}, - 'break': {'sparse': True, 'intensity': 0.4, 'variant': 'minimal'}, - 'outro': {'fading': True, 'intensity': 0.3, 'variant': 'strip_down'}, - }, - 'perc_alt': { - 'intro': {'sparse': True, 'intensity': 0.2, 'variant': 'minimal'}, - 'build': {'building': True, 'intensity': 0.6, 'variant': 'tension'}, - 'drop': {'full': True, 'intensity': 0.7, 'variant': 'groove'}, - 'break': {'sparse': True, 'intensity': 0.3, 'variant': 'atmos'}, - 'outro': {'fading': True, 'intensity': 0.2, 'variant': 'minimal'}, - }, - 'top_loop': { - 'intro': {'use': False, 'variant': 'absent'}, - 'build': {'building': True, 'intensity': 0.8, 'variant': 'energy'}, - 'drop': {'full': True, 'intensity': 1.0, 'variant': 'full'}, - 'break': {'sparse': True, 'intensity': 0.4, 'variant': 'filtered'}, - 'outro': {'use': False, 'variant': 'absent'}, - }, - 'hat_open': { - 'intro': {'use': False, 'variant': 'absent'}, - 'build': {'building': True, 'intensity': 0.7, 'variant': 'tease'}, - 'drop': {'full': True, 'intensity': 0.9, 'variant': 'offbeat'}, - 'break': {'sparse': True, 'intensity': 0.3, 'variant': 'filtered'}, - 'outro': {'fading': True, 'intensity': 0.4, 'variant': 'fading'}, - }, - 'ride': { - 'intro': {'use': False, 'variant': 'absent'}, - 'build': {'building': True, 'intensity': 0.6, 'variant': 'building'}, - 'drop': {'full': True, 'intensity': 0.8, 'variant': 'full'}, - 'break': {'sparse': True, 'intensity': 0.3, 'variant': 'sparse'}, - 'outro': {'fading': True, 'intensity': 0.4, 'variant': 'minimal'}, - }, - 'snare_fill': { - 'intro': {'use': False, 'variant': 'absent'}, - 'build': {'tension': True, 'intensity': 0.8, 'variant': 'rolling'}, - 'drop': {'impact': True, 'intensity': 0.6, 'variant': 'fill'}, - 'break': {'sparse': True, 'intensity': 0.5, 'variant': 'tension'}, - 'outro': {'use': False, 'variant': 'absent'}, - }, - 'tom_fill': { - 'intro': {'use': False, 'variant': 'absent'}, - 'build': {'rising': True, 'intensity': 0.7, 'variant': 'rising'}, - 'drop': {'impact': True, 'intensity': 0.5, 'variant': 'fill'}, - 'break': {'use': False, 'variant': 'absent'}, - 'outro': {'use': False, 'variant': 'absent'}, - }, - 'vocal_shot': { - 'intro': {'sparse': True, 'variant': 'hint'}, - 'build': {'building': True, 'variant': 'anticipate'}, - 'drop': {'full': True, 'variant': 'hook'}, - 'break': {'sparse': True, 'variant': 'filtered'}, - 'outro': {'fading': True, 'variant': 'minimal'}, - }, - 'synth_peak': { - 'intro': {'use': False, 'variant': 'absent'}, - 'build': {'building': True, 'variant': 'rising'}, - 'drop': {'full': True, 'variant': 'anthem'}, - 'break': {'use': False, 'variant': 'absent'}, - 'outro': {'use': False, 'variant': 'absent'}, - }, - 'atmos': { - 'intro': {'full': True, 'decay': 'long', 'variant': 'atmospheric'}, - 'build': {'building': True, 'variant': 'tension'}, - 'drop': {'sparse': True, 'variant': 'minimal'}, - 'break': {'full': True, 'decay': 'long', 'variant': 'ethereal'}, - 'outro': {'fading': True, 'decay': 'long', 'variant': 'fading'}, - }, - 'chords': { - 'intro': {'sparse': True, 'variant': 'foreshadow'}, - 'build': {'building': True, 'variant': 'rising'}, - 'drop': {'full': True, 'variant': 'full'}, - 'break': {'sparse': True, 'variant': 'atmospheric'}, - 'outro': {'fading': True, 'variant': 'echo'}, - }, - 'pad': { - 'intro': {'full': True, 'variant': 'atmospheric'}, - 'build': {'building': True, 'variant': 'tension'}, - 'drop': {'sparse': True, 'variant': 'minimal'}, - 'break': {'full': True, 'variant': 'ethereal'}, - 'outro': {'fading': True, 'variant': 'decay'}, - }, - 'lead': { - 'intro': {'use': False, 'variant': 'absent'}, - 'build': {'building': True, 'variant': 'rising'}, - 'drop': {'full': True, 'variant': 'hook'}, - 'break': {'sparse': True, 'variant': 'minimal'}, - 'outro': {'use': False, 'variant': 'absent'}, - }, - 'arp': { - 'intro': {'sparse': True, 'variant': 'ghost'}, - 'build': {'building': True, 'variant': 'energy'}, - 'drop': {'full': True, 'variant': 'driving'}, - 'break': {'sparse': True, 'variant': 'filtered'}, - 'outro': {'use': False, 'variant': 'absent'}, - }, - 'pluck': { - 'intro': {'sparse': True, 'variant': 'hint'}, - 'build': {'building': True, 'variant': 'tension'}, - 'drop': {'full': True, 'variant': 'punchy'}, - 'break': {'sparse': True, 'variant': 'minimal'}, - 'outro': {'fading': True, 'variant': 'strip_down'}, - }, - 'bass': { - 'intro': {'sparse': True, 'variant': 'subtle'}, - 'build': {'building': True, 'variant': 'rising'}, - 'drop': {'full': True, 'variant': 'groove'}, - 'break': {'sparse': True, 'variant': 'filtered'}, - 'outro': {'fading': True, 'variant': 'fading'}, - }, - 'sub_bass': { - 'intro': {'use': False, 'variant': 'absent'}, - 'build': {'building': True, 'variant': 'hint'}, - 'drop': {'full': True, 'variant': 'deep'}, - 'break': {'sparse': True, 'variant': 'minimal'}, - 'outro': {'use': False, 'variant': 'absent'}, - }, - 'stab': { - 'intro': {'use': False, 'variant': 'absent'}, - 'build': {'sparse': True, 'variant': 'hint'}, - 'drop': {'full': True, 'variant': 'impact'}, - 'break': {'use': False, 'variant': 'absent'}, - 'outro': {'use': False, 'variant': 'absent'}, - }, -} - -# ============================================================================= -# DRUM PATTERN BANKS - Expanded variants for section-specific patterns -# ============================================================================= - -# Section-specific drum variants - EXPANDED for variation -DRUM_SECTION_VARIANTS = { - 'intro': { - 'kick': ['sparse', 'minimal', 'foreshadow', 'hint'], - 'clap': ['absent', 'hint'], - 'hat_closed': ['sparse', 'ghost', 'whisper'], - 'hat_open': ['absent', 'hint'], - 'perc': ['minimal', 'atmos', 'ghost'], - 'ride': ['absent'], - 'top_loop': ['absent', 'hint'], - 'snare_fill': ['absent'], - 'tom_fill': ['absent'], - }, - 'build': { - 'kick': ['building', 'pressure', 'rising', 'tension'], - 'clap': ['building', 'anticipate', 'roll_in'], - 'hat_closed': ['building', 'open_up', 'hyper'], - 'hat_open': ['building', 'tease'], - 'perc': ['layering', 'tension', 'build_up'], - 'ride': ['building', 'rising'], - 'top_loop': ['building', 'energy'], - 'snare_fill': ['rolling', 'tension'], - 'tom_fill': ['rising', 'fill'], - }, - 'drop': { - 'kick': ['full', 'punch', 'four_on_floor', 'groove', 'impact'], - 'clap': ['full', 'backbeat', 'syncopated', 'punch'], - 'hat_closed': ['full', 'groove', 'offbeat', 'shuffle'], - 'hat_open': ['full', 'offbeat', 'groove'], - 'perc': ['full', 'layered', 'groove', 'latin', 'tribal'], - 'ride': ['full', 'groove', 'energy'], - 'top_loop': ['full', 'energy', 'layered'], - 'snare_fill': ['drop_hit', 'fill'], - 'tom_fill': ['drop_hit', 'fill'], - }, - 'break': { - 'kick': ['sparse', 'absent', 'minimal', 'foreshadow'], - 'clap': ['sparse', 'offbeat', 'ghost'], - 'hat_closed': ['open', 'sparse', 'atmos', 'filtered'], - 'hat_open': ['sparse', 'filtered'], - 'perc': ['minimal', 'atmos', 'filtered'], - 'ride': ['sparse', 'filtered'], - 'top_loop': ['filtered', 'hint'], - 'snare_fill': ['tension'], - 'tom_fill': ['tension'], - }, - 'outro': { - 'kick': ['fading', 'minimal', 'sparse', 'strip_down'], - 'clap': ['fading', 'sparse', 'last_hit'], - 'hat_closed': ['fading', 'open', 'minimal'], - 'hat_open': ['fading', 'last_hit'], - 'perc': ['fading', 'minimal', 'strip_down'], - 'ride': ['fading', 'minimal'], - 'top_loop': ['fading', 'minimal'], - 'snare_fill': ['end_fill', 'absent'], - 'tom_fill': ['end_fill', 'absent'], - }, -} - -# Expanded drum pattern generators for section variation -DRUM_PATTERN_BANKS = { - 'kick': { - 'four_on_floor': [0.0, 1.0, 2.0, 3.0], - 'sparse': [0.0, 2.0], - 'minimal': [0.0], - 'foreshadow': [0.0, 3.5], - 'hint': [0.0, 2.5], - 'building': [0.0, 1.0, 2.0, 3.0, 3.5], - 'pressure': [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], - 'rising': [0.0, 1.0, 2.0, 2.75, 3.0, 3.25, 3.5, 3.75], - 'tension': [0.0, 0.25, 1.0, 1.5, 2.0, 2.75, 3.0, 3.25, 3.5], - 'full': [0.0, 1.0, 2.0, 3.0], - 'punch': [0.0, 0.25, 1.0, 2.0, 3.0], - 'groove': [0.0, 0.75, 1.0, 1.75, 2.0, 2.75, 3.0, 3.75], - 'impact': [0.0, 0.25, 0.5, 1.0, 2.0, 3.0], - 'fading': [0.0, 2.0], - 'strip_down': [0.0], - 'absent': [], - }, - 'clap': { - 'backbeat': [1.0, 3.0], - 'sparse': [1.0], - 'hint': [3.0], - 'building': [1.0, 2.5, 3.0], - 'anticipate': [1.0, 2.0, 2.75, 3.0, 3.5], - 'roll_in': [0.75, 1.0, 1.25, 1.5, 2.75, 3.0, 3.25, 3.5], - 'full': [1.0, 3.0], - 'syncopated': [0.75, 1.0, 2.75, 3.0], - 'offbeat': [1.5, 3.5], - 'punch': [0.75, 1.0, 1.25, 2.75, 3.0, 3.25], - 'ghost': [3.0], - 'last_hit': [1.0], - 'fading': [1.0], - 'absent': [], - }, - 'hat_closed': { - 'offbeat': [0.5, 1.5, 2.5, 3.5], - 'sparse': [0.5, 2.5], - 'ghost': [0.25, 1.25, 2.25, 3.25], - 'whisper': [0.75, 1.75, 2.75, 3.75], - 'building': [0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], - 'open_up': [0.5, 0.75, 1.5, 1.75, 2.5, 2.75, 3.5, 3.75], - 'hyper': [0.0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0, 3.25, 3.5, 3.75], - 'full': [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], - 'groove': [0.0, 0.25, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], - 'shuffle': [0.0, 0.33, 0.66, 1.0, 1.33, 1.66, 2.0, 2.33, 2.66, 3.0, 3.33, 3.66], - 'filtered': [0.5, 1.5, 2.5, 3.5], - 'energy': [0.0, 0.25, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], - 'fading': [0.5, 2.5], - 'minimal': [0.5], - }, - 'hat_open': { - 'sparse': [2.0], - 'building': [1.5, 2.5, 3.0], - 'full': [0.0, 2.0], - 'offbeat': [1.5, 3.5], - 'tease': [3.5], - 'fading': [2.0], - 'last_hit': [3.5], - 'hint': [2.0], - 'absent': [], - }, - 'perc': { - 'minimal': [1.5], - 'atmos': [0.75, 2.75], - 'ghost': [0.25, 2.25], - 'layering': [0.5, 1.5, 2.5, 3.5], - 'tension': [0.25, 1.25, 2.25, 3.25], - 'build_up': [0.5, 1.0, 2.0, 3.0, 3.5], - 'full': [0.5, 1.0, 1.5, 2.5, 3.0, 3.5], - 'layered': [0.25, 0.75, 1.25, 1.75, 2.25, 2.75, 3.25, 3.75], - 'groove': [0.5, 1.0, 2.0, 2.5, 3.5], - 'latin': [0.25, 0.75, 1.25, 1.75, 2.25, 2.75, 3.25, 3.75], - 'tribal': [0.0, 0.5, 1.25, 1.75, 2.5, 3.0, 3.75], - 'filtered': [0.5, 2.5], - 'fading': [1.5], - 'strip_down': [0.0], - 'hint': [2.0], - }, - 'ride': { - 'sparse': [0.0, 2.0], - 'building': [0.0, 1.0, 2.0, 3.0], - 'rising': [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0], - 'full': [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], - 'groove': [0.0, 0.25, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], - 'energy': [0.0, 0.25, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.25, 3.5], - 'filtered': [0.0, 2.0], - 'fading': [0.0], - 'minimal': [0.0], - 'absent': [], - }, - 'top_loop': { - 'minimal': [0.25, 1.25, 2.25, 3.25], - 'energy': [0.0, 0.25, 0.5, 1.0, 1.25, 1.5, 2.0, 2.25, 2.5, 3.0, 3.25, 3.5], - 'building': [0.25, 0.75, 1.25, 1.75, 2.25, 2.75, 3.25, 3.75], - 'full': [0.0, 0.25, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], - 'layered': [0.25, 0.5, 0.75, 1.25, 1.5, 1.75, 2.25, 2.5, 2.75, 3.25, 3.5, 3.75], - 'filtered': [0.5, 1.5, 2.5, 3.5], - 'fading': [0.5, 2.5], - 'hint': [1.5, 3.5], - 'absent': [], - }, - 'snare_fill': { - 'rolling': [2.0, 2.125, 2.25, 2.375, 2.5, 2.625, 2.75, 2.875, 3.0, 3.125, 3.25, 3.375, 3.5, 3.625, 3.75, 3.875], - 'tension': [3.0, 3.125, 3.25, 3.375, 3.5, 3.625, 3.75, 3.875], - 'drop_hit': [0.0], - 'fill': [3.0, 3.25, 3.5, 3.75], - 'end_fill': [0.0, 0.25, 0.5, 0.75], - 'absent': [], - }, - 'tom_fill': { - 'rising': [3.0, 3.2, 3.4, 3.6, 3.8], - 'fill': [3.0, 3.125, 3.25, 3.375, 3.5], - 'drop_hit': [0.0], - 'tension': [3.5, 3.625, 3.75, 3.875], - 'end_fill': [0.0, 0.2, 0.4, 0.6], - 'absent': [], - }, -} - -# Section-specific bass variants - EXPANDED -BASS_SECTION_VARIANTS = { - 'intro': ['subtle', 'hint', 'foreshadow', 'ghost', 'minimal'], - 'build': ['rising', 'tension', 'anticipate', 'building', 'pressure'], - 'drop': ['full', 'punch', 'groove', 'deep', 'impact', 'energy', 'rolling'], - 'break': ['sparse', 'minimal', 'atmos', 'filtered', 'foreshadow'], - 'outro': ['fading', 'minimal', 'subtle', 'strip_down'], -} - -# Expanded bass pattern templates (relative positions in 4-bar cycle) -BASS_PATTERN_BANKS = { - 'anchor': { - 'positions': [0.0, 1.0, 2.0, 3.0], - 'durations': [0.5, 0.5, 0.5, 0.5], - 'style': 'root_heavy' - }, - 'subtle': { - 'positions': [0.0, 2.0], - 'durations': [0.3, 0.3], - 'style': 'minimal' - }, - 'hint': { - 'positions': [0.0, 3.5], - 'durations': [0.25, 0.25], - 'style': 'foreshadow' - }, - 'foreshadow': { - 'positions': [0.0, 1.0, 3.0, 3.5], - 'durations': [0.4, 0.3, 0.4, 0.3], - 'style': 'building' - }, - 'ghost': { - 'positions': [0.5, 2.5], - 'durations': [0.2, 0.2], - 'style': 'minimal' - }, - 'rising': { - 'positions': [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], - 'durations': [0.4, 0.3, 0.4, 0.3, 0.4, 0.3, 0.5, 0.4], - 'style': 'ascending' - }, - 'tension': { - 'positions': [0.0, 0.75, 1.5, 2.25, 3.0, 3.5], - 'durations': [0.5, 0.25, 0.5, 0.25, 0.5, 0.3], - 'style': 'syncopated' - }, - 'anticipate': { - 'positions': [0.0, 1.0, 2.0, 2.75, 3.0, 3.25, 3.5], - 'durations': [0.5, 0.5, 0.4, 0.2, 0.4, 0.2, 0.4], - 'style': 'building' - }, - 'building': { - 'positions': [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.25, 3.5, 3.75], - 'durations': [0.4, 0.3, 0.4, 0.3, 0.4, 0.3, 0.3, 0.2, 0.3, 0.2], - 'style': 'ascending' - }, - 'pressure': { - 'positions': [0.0, 0.25, 0.5, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0, 3.25, 3.5, 3.75], - 'durations': [0.3, 0.2, 0.3, 0.2, 0.4, 0.4, 0.4, 0.4, 0.3, 0.2, 0.3, 0.2], - 'style': 'intense' - }, - 'full': { - 'positions': [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], - 'durations': [0.5, 0.4, 0.5, 0.4, 0.5, 0.4, 0.5, 0.4], - 'style': 'groove' - }, - 'punch': { - 'positions': [0.0, 0.25, 1.0, 2.0, 3.0], - 'durations': [0.6, 0.2, 0.5, 0.5, 0.5], - 'style': 'punchy' - }, - 'groove': { - 'positions': [0.0, 0.25, 0.75, 1.0, 1.75, 2.0, 2.75, 3.0, 3.5], - 'durations': [0.4, 0.2, 0.3, 0.4, 0.3, 0.4, 0.3, 0.4, 0.3], - 'style': 'syncopated' - }, - 'deep': { - 'positions': [0.0, 1.0, 2.0, 3.0], - 'durations': [0.8, 0.8, 0.8, 0.8], - 'style': 'sub' - }, - 'impact': { - 'positions': [0.0, 0.5, 1.5, 2.0, 3.0, 3.5], - 'durations': [0.6, 0.4, 0.3, 0.5, 0.5, 0.4], - 'style': 'punchy' - }, - 'energy': { - 'positions': [0.0, 0.25, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], - 'durations': [0.4, 0.25, 0.4, 0.5, 0.4, 0.5, 0.4, 0.5, 0.4], - 'style': 'driving' - }, - 'rolling': { - 'positions': [0.0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0, 3.25, 3.5, 3.75], - 'durations': [0.2, 0.15, 0.2, 0.15, 0.2, 0.15, 0.2, 0.15, 0.2, 0.15, 0.2, 0.15, 0.2, 0.15, 0.2, 0.15], - 'style': 'rolling' - }, - 'sparse': { - 'positions': [0.0, 2.0], - 'durations': [0.4, 0.4], - 'style': 'minimal' - }, - 'minimal': { - 'positions': [0.0], - 'durations': [0.3], - 'style': 'hint' - }, - 'atmos': { - 'positions': [0.0, 3.0], - 'durations': [0.6, 0.4], - 'style': 'atmospheric' - }, - 'filtered': { - 'positions': [0.0, 1.5, 2.5], - 'durations': [0.4, 0.3, 0.3], - 'style': 'filtered' - }, - 'fading': { - 'positions': [0.0, 2.0], - 'durations': [0.5, 0.3], - 'style': 'decay' - }, - 'strip_down': { - 'positions': [0.0], - 'durations': [0.25], - 'style': 'minimal' - }, - 'bounce': { - 'positions': [0.0, 0.5, 1.5, 2.0, 2.5, 3.5], - 'durations': [0.4, 0.3, 0.4, 0.4, 0.3, 0.4], - 'style': 'bouncy' - }, - 'syncopated': { - 'positions': [0.25, 0.75, 1.25, 1.75, 2.25, 2.75, 3.25, 3.75], - 'durations': [0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2], - 'style': 'offbeat' - }, -} - -# Pattern variant diversity memory - track used variants across generations -_pattern_variant_memory: Dict[str, Dict[str, int]] = { - 'drum': {}, - 'bass': {}, - 'melodic': {}, -} - -def _get_pattern_variant_penalty(category: str, variant: str) -> float: - """Get penalty for a pattern variant based on cross-generation usage.""" - if variant in _pattern_variant_memory.get(category, {}): - count = _pattern_variant_memory[category].get(variant, 0) - return min(0.4, count * 0.08) - return 0.0 - -def _record_pattern_variant_usage(category: str, variant: str) -> None: - """Record that a pattern variant was used.""" - if category not in _pattern_variant_memory: - _pattern_variant_memory[category] = {} - _pattern_variant_memory[category][variant] = _pattern_variant_memory[category].get(variant, 0) + 1 - -def _decay_pattern_variant_memory() -> None: - """Decay pattern variant memory to allow reuse after generations.""" - for category in _pattern_variant_memory: - for variant in list(_pattern_variant_memory[category].keys()): - _pattern_variant_memory[category][variant] = max(0, _pattern_variant_memory[category][variant] - 1) - if _pattern_variant_memory[category][variant] <= 0: - del _pattern_variant_memory[category][variant] - -def reset_pattern_variant_memory() -> None: - """Reset all pattern variant memory.""" - global _pattern_variant_memory - _pattern_variant_memory = {'drum': {}, 'bass': {}, 'melodic': {}} - -# Expanded fill patterns for section transitions -FILL_PATTERNS = { - 'drum_fill_4bar': { - 'roles': ['snare', 'kick', 'hat'], - 'pattern': { - 'snare': [3.0, 3.25, 3.5, 3.75], - 'kick': [3.5], - 'hat': [3.0, 3.5] - }, - 'velocities': {'snare': 100, 'kick': 90, 'hat': 70} - }, - 'drum_fill_2bar': { - 'roles': ['snare', 'hat'], - 'pattern': { - 'snare': [1.5, 1.75], - 'hat': [1.5] - }, - 'velocities': {'snare': 95, 'hat': 65} - }, - 'snare_roll': { - 'roles': ['snare'], - 'pattern': { - 'snare': [0.0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0, 1.125, 1.25, 1.375, 1.5, 1.625, 1.75, 1.875] - }, - 'velocities': {'snare': 85} - }, - 'hat_open_build': { - 'roles': ['hat_open'], - 'pattern': { - 'hat_open': [0.0, 0.5, 1.0, 1.5, 2.0, 2.25, 2.5, 2.75, 3.0, 3.125, 3.25, 3.375, 3.5, 3.625, 3.75, 3.875] - }, - 'velocities': {'hat_open': 75} - }, - 'kick_drop': { - 'roles': ['kick'], - 'pattern': { - 'kick': [0.0] - }, - 'velocities': {'kick': 127} - }, - 'crash_impact': { - 'roles': ['crash'], - 'pattern': { - 'crash': [0.0] - }, - 'velocities': {'crash': 100} - }, - 'snare_roll_build': { - 'roles': ['snare', 'hat'], - 'pattern': { - 'snare': [2.0, 2.25, 2.5, 2.75, 3.0, 3.125, 3.25, 3.375, 3.5, 3.625, 3.75, 3.875], - 'hat': [2.0, 2.5, 3.0, 3.5] - }, - 'velocities': {'snare': 88, 'hat': 70} - }, - 'tom_build': { - 'roles': ['tom_fill'], - 'pattern': { - 'tom_fill': [2.0, 2.2, 2.4, 2.6, 2.8, 3.0, 3.2, 3.4, 3.6, 3.8] - }, - 'velocities': {'tom_fill': 90} - }, - 'full_impact': { - 'roles': ['kick', 'snare', 'crash'], - 'pattern': { - 'kick': [0.0], - 'snare': [0.0, 0.25], - 'crash': [0.0] - }, - 'velocities': {'kick': 127, 'snare': 110, 'crash': 105} - }, - 'hat_tension': { - 'roles': ['hat_closed'], - 'pattern': { - 'hat_closed': [0.0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0, 1.125, 1.25, 1.375, 1.5, 1.625, 1.75, 1.875] - }, - 'velocities': {'hat_closed': 72} - }, - 'percussion_fill': { - 'roles': ['perc'], - 'pattern': { - 'perc': [0.5, 0.75, 1.25, 1.5, 2.0, 2.5, 3.0, 3.5] - }, - 'velocities': {'perc': 78} - }, - 'minimal_drop': { - 'roles': ['kick'], - 'pattern': { - 'kick': [0.0] - }, - 'velocities': {'kick': 120} - }, - 'build_tension': { - 'roles': ['snare', 'hat_closed', 'kick'], - 'pattern': { - 'snare': [2.5, 2.75, 3.0, 3.25, 3.5, 3.75], - 'hat_closed': [2.0, 2.5, 3.0, 3.5], - 'kick': [0.0] - }, - 'velocities': {'snare': 92, 'hat_closed': 68, 'kick': 95} - }, - 'outro_fade': { - 'roles': ['hat_closed', 'perc'], - 'pattern': { - 'hat_closed': [0.0, 0.5, 1.0], - 'perc': [0.25, 0.75, 1.25] - }, - 'velocities': {'hat_closed': 80, 'perc': 70} - }, -} - -# Expanded transition events between sections -TRANSITION_EVENTS = { - ('intro', 'build'): ['hat_tension', 'hat_open_build'], - ('build', 'drop'): ['full_impact', 'crash_impact', 'kick_drop', 'snare_roll_build'], - ('drop', 'break'): ['drum_fill_4bar', 'percussion_fill'], - ('break', 'build'): ['hat_tension', 'hat_open_build'], - ('break', 'drop'): ['crash_impact', 'kick_drop', 'full_impact'], - ('drop', 'outro'): ['drum_fill_2bar', 'outro_fade'], - ('outro', 'end'): ['minimal_drop'], -} - -# Rules for preventing transition overcrowding -TRANSITION_DENSITY_RULES = { - # Max fills per section kind - 'max_fills_by_section': { - 'intro': 1, # Minimal fills in intro - 'build': 3, # More fills for tension - 'drop': 2, # Moderate fills - 'break': 2, # Sparse - 'outro': 1, # Minimal - }, - - # Events that should not stack together - 'exclusive_events': [ - {'crash_impact', 'kick_drop'}, # Don't stack impact events - {'drum_fill_4bar', 'snare_roll'}, # Choose one drum fill - ], - - # Minimum distance between same-type fills (in beats) - 'min_distance_same_type': { - 'crash_impact': 8.0, - 'kick_drop': 16.0, - 'snare_roll': 4.0, - } -} - -# Section-specific melodic variants - EXPANDED -MELODIC_SECTION_VARIANTS = { - 'intro': ['subtle', 'foreshadow', 'atmospheric', 'ghost', 'hint'], - 'build': ['rising', 'tension', 'anticipate', 'building', 'energy'], - 'drop': ['hook', 'anthem', 'full', 'punchy', 'impact', 'driving'], - 'break': ['sparse', 'minimal', 'ethereal', 'filtered', 'atmospheric'], - 'outro': ['fading', 'echo', 'minimal', 'strip_down', 'decay'], -} - -# Expanded melodic pattern templates -MELODIC_PATTERN_BANKS = { - 'motif': { - 'intervals': [0, 4, 7, 0], - 'rhythm': [0.0, 0.5, 1.0, 1.5], - 'durations': [0.4, 0.3, 0.4, 0.3], - 'style': 'repeating' - }, - 'subtle': { - 'intervals': [0, 0], - 'rhythm': [0.0, 2.0], - 'durations': [0.3, 0.3], - 'style': 'minimal' - }, - 'foreshadow': { - 'intervals': [0, 4, 0], - 'rhythm': [0.0, 1.0, 3.5], - 'durations': [0.4, 0.3, 0.5], - 'style': 'hint' - }, - 'atmospheric': { - 'intervals': [0, 2, 4, 5, 7], - 'rhythm': [0.0, 0.8, 1.6, 2.4, 3.2], - 'durations': [0.8, 0.7, 0.6, 0.5, 0.4], - 'style': 'pad' - }, - 'ghost': { - 'intervals': [0, 7], - 'rhythm': [0.5, 2.5], - 'durations': [0.2, 0.2], - 'style': 'minimal' - }, - 'hint': { - 'intervals': [0, 5], - 'rhythm': [0.0, 3.0], - 'durations': [0.25, 0.25], - 'style': 'minimal' - }, - 'rising': { - 'intervals': [0, 2, 4, 5, 7, 9, 11, 12], - 'rhythm': [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], - 'durations': [0.4, 0.35, 0.4, 0.35, 0.4, 0.35, 0.5, 0.4], - 'style': 'ascending' - }, - 'tension': { - 'intervals': [0, 1, 0, 1, 2, 1, 0], - 'rhythm': [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0], - 'durations': [0.3, 0.2, 0.3, 0.2, 0.3, 0.2, 0.5], - 'style': 'chromatic' - }, - 'anticipate': { - 'intervals': [0, 4, 7, 9, 12], - 'rhythm': [0.0, 1.0, 2.0, 3.0, 3.75], - 'durations': [0.5, 0.4, 0.5, 0.3, 0.5], - 'style': 'buildup' - }, - 'building': { - 'intervals': [0, 2, 4, 5, 7, 9, 11], - 'rhythm': [0.0, 0.5, 1.0, 1.5, 2.0, 2.75, 3.5], - 'durations': [0.4, 0.3, 0.4, 0.3, 0.4, 0.3, 0.5], - 'style': 'ascending' - }, - 'energy': { - 'intervals': [0, 4, 7, 9, 12, 14], - 'rhythm': [0.0, 0.25, 0.75, 1.25, 2.0, 2.75], - 'durations': [0.3, 0.25, 0.3, 0.25, 0.4, 0.5], - 'style': 'driving' - }, - 'hook': { - 'intervals': [0, 4, 7, 4, 0, 4, 7, 12], - 'rhythm': [0.0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - 'durations': [0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.3], - 'style': 'catchy' - }, - 'anthem': { - 'intervals': [0, 4, 7, 12, 11, 7, 4, 0], - 'rhythm': [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], - 'durations': [0.4, 0.4, 0.4, 0.5, 0.4, 0.4, 0.4, 0.5], - 'style': 'big' - }, - 'full': { - 'intervals': [0, 4, 7, 5, 4, 2, 0], - 'rhythm': [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0], - 'durations': [0.4, 0.3, 0.4, 0.3, 0.4, 0.3, 0.5], - 'style': 'melodic' - }, - 'punchy': { - 'intervals': [0, 7, 0, 12], - 'rhythm': [0.0, 0.25, 0.5, 0.75], - 'durations': [0.15, 0.15, 0.15, 0.2], - 'style': 'staccato' - }, - 'impact': { - 'intervals': [0, 5, 7, 12, 7, 5], - 'rhythm': [0.0, 0.5, 0.75, 1.5, 2.25, 3.0], - 'durations': [0.4, 0.25, 0.3, 0.5, 0.3, 0.4], - 'style': 'driving' - }, - 'driving': { - 'intervals': [0, 4, 7, 4, 0, 4, 5, 7], - 'rhythm': [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], - 'durations': [0.35, 0.35, 0.35, 0.35, 0.35, 0.35, 0.35, 0.4], - 'style': 'repeating' - }, - 'sparse': { - 'intervals': [0, 7], - 'rhythm': [0.0, 2.0], - 'durations': [0.4, 0.4], - 'style': 'minimal' - }, - 'minimal': { - 'intervals': [0], - 'rhythm': [0.0], - 'durations': [0.3], - 'style': 'single' - }, - 'ethereal': { - 'intervals': [0, 7, 12, 7], - 'rhythm': [0.0, 1.5, 2.5, 3.5], - 'durations': [1.0, 0.8, 1.0, 0.8], - 'style': 'pad' - }, - 'filtered': { - 'intervals': [0, 4, 7, 5], - 'rhythm': [0.0, 1.0, 2.0, 3.0], - 'durations': [0.5, 0.4, 0.5, 0.4], - 'style': 'filtered' - }, - 'fading': { - 'intervals': [0, 4, 0], - 'rhythm': [0.0, 1.0, 2.0], - 'durations': [0.5, 0.4, 0.3], - 'style': 'decay' - }, - 'echo': { - 'intervals': [0, 0, 0], - 'rhythm': [0.0, 0.5, 1.0], - 'durations': [0.3, 0.25, 0.2], - 'style': 'repeat' - }, - 'response': { - 'intervals': [7, 4, 0], - 'rhythm': [0.5, 1.5, 2.5], - 'durations': [0.3, 0.3, 0.4], - 'style': 'call_response' - }, - 'lift': { - 'intervals': [0, 4, 7, 12, 14, 16], - 'rhythm': [0.0, 0.5, 1.0, 1.5, 2.0, 2.5], - 'durations': [0.3, 0.3, 0.3, 0.4, 0.3, 0.4], - 'style': 'ascending' - }, - 'strip_down': { - 'intervals': [0], - 'rhythm': [0.0], - 'durations': [0.25], - 'style': 'minimal' - }, - 'decay': { - 'intervals': [0, 7, 5, 3], - 'rhythm': [0.0, 1.0, 2.0, 3.0], - 'durations': [0.5, 0.4, 0.3, 0.2], - 'style': 'descending' - }, - 'call_response': { - 'intervals': [0, 4, 7, 0, 7, 4], - 'rhythm': [0.0, 0.25, 0.5, 1.5, 2.0, 2.5], - 'durations': [0.25, 0.2, 0.3, 0.35, 0.25, 0.3], - 'style': 'call_response' - }, -} - -# ============================================================================= -# MASTER CHAIN AUTOMATION TARGETS -# ============================================================================= - - -@dataclass -class StyleConfig: - """Configuración de estilo musical""" - genre: str - bpm: float - key: str - scale: str - density: str # minimal, normal, busy - complexity: str # simple, moderate, complex - - -class SongGenerator: - """Generador de configuraciones y patrones musicales""" - - def __init__(self): - self.logger = logging.getLogger("SongGenerator") - self._current_generation_profile = { - 'name': 'default', - 'seed': 0, - 'drum_tightness': 1.0, - 'bass_motion': 'locked', - 'melodic_motion': 'restrained', - 'pan_width': 0.12, - 'fx_bias': 1.0, - } - # Track style adjustments and calibrated volumes for this generation - self._style_adjustments_applied = [] - self._calibrated_bus_volumes = {} - # Tracking for ROLE_GAIN_CALIBRATION overrides - self._gain_calibration_overrides_count = 0 - self._peak_reductions_count = 0 - self._master_profile_used = 'default' - - # ========================================================================= - # UTILIDADES MUSICALES - # ========================================================================= - - def note_name_to_midi(self, note_name: str, octave: int = 3) -> int: - """Convierte nombre de nota a número MIDI""" - note_name = note_name.replace('b', '#').replace('Db', 'C#').replace('Eb', 'D#') - note_name = note_name.replace('Gb', 'F#').replace('Ab', 'G#').replace('Bb', 'A#') - - try: - note_idx = NOTE_NAMES.index(note_name.upper()) - return (octave + 1) * 12 + note_idx - except ValueError: - return 60 # Default C4 - - def midi_to_note_name(self, midi_note: int) -> tuple: - """Convierte MIDI a (nota, octava)""" - octave = (midi_note // 12) - 1 - note_name = NOTE_NAMES[midi_note % 12] - return note_name, octave - - def get_scale_notes(self, root_note: Union[int, str], scale_name: str = 'minor') -> List[int]: - """Obtiene las notas de una escala""" - if isinstance(root_note, str): - root_midi = self.note_name_to_midi(root_note) - else: - root_midi = root_note - - scale_intervals = SCALES.get(scale_name, SCALES['minor']) - return [root_midi + interval for interval in scale_intervals] - - def quantize_to_scale(self, note: int, scale_notes: List[int]) -> int: - """Cuantiza una nota a la escala más cercana""" - if note in scale_notes: - return note - return min(scale_notes, key=lambda x: abs(x - note)) - - # ========================================================================= - # GENERACIÓN DE CONFIGURACIONES - # ========================================================================= - - def _make_note(self, pitch: int, start: float, duration: float, velocity: int) -> Dict[str, Any]: - return { - 'pitch': max(0, min(127, int(pitch))), - 'start': round(float(start), 3), - 'duration': round(max(0.05, float(duration)), 3), - 'velocity': max(1, min(127, int(velocity))), - } - - def _repeat_pattern(self, pattern: List[Dict[str, Any]], total_length: float, pattern_length: float = 4.0) -> List[Dict[str, Any]]: - if not pattern or total_length <= 0 or pattern_length <= 0: - return [] - - notes = [] - repeats = max(1, int(round(total_length / pattern_length))) - for repeat_index in range(repeats): - offset = repeat_index * pattern_length - for note in pattern: - start = float(note['start']) + offset - if start >= total_length: - continue - duration = min(float(note['duration']), total_length - start) - notes.append(self._make_note(note['pitch'], start, duration, note['velocity'])) - return notes - - def _section_rng(self, section: Dict[str, Any], role: str, salt: int = 0) -> random.Random: - base_seed = int(self._current_generation_profile.get('seed', 0)) - section_index = int(section.get('index', 0)) - role_fingerprint = sum((index + 1) * ord(char) for index, char in enumerate(str(role))) - return random.Random(base_seed + (section_index * 1009) + (role_fingerprint * 17) + (salt * 7919)) - - def _clamp_pan(self, value: float) -> float: - return round(max(-1.0, min(1.0, float(value))), 3) - - def _clamp_unit(self, value: float) -> float: - return round(max(0.0, min(1.0, float(value))), 3) - - def _apply_swing(self, notes: List[Dict[str, Any]], amount: float, section_length: float) -> List[Dict[str, Any]]: - if not notes or abs(amount) < 0.001: - return notes - - swung = [] - for note in notes: - start = float(note['start']) - fractional = round(start % 1.0, 3) - if 0.001 < fractional < 0.999: - shift = amount if fractional >= 0.5 else (amount * -0.45) - start = min(max(0.0, start + shift), max(0.0, section_length - 0.05)) - swung.append(self._make_note(note['pitch'], start, note['duration'], note['velocity'])) - swung.sort(key=lambda item: (item['start'], item['pitch'])) - return swung - - def _apply_density_mask(self, notes: List[Dict[str, Any]], section: Dict[str, Any], role: str, - keep_probability: float) -> List[Dict[str, Any]]: - if not notes or keep_probability >= 0.995: - return notes - - rng = self._section_rng(section, role, salt=3) - filtered = [] - for note in notes: - start = float(note['start']) - if abs(start % 1.0) < 0.001: - filtered.append(note) - continue - if rng.random() <= keep_probability: - filtered.append(note) - return filtered or notes[:1] - - def _build_arrangement_profile(self, genre: str, style: str, variant_seed: int) -> Dict[str, Any]: - style_text = "{} {}".format(genre, style).lower() - candidates = [profile for profile in ARRANGEMENT_PROFILES if genre in set(profile.get('genres', ()))] - - if 'latin' in style_text: - candidates = [profile for profile in ARRANGEMENT_PROFILES if profile['name'] in ['swing', 'jackin']] or candidates - elif 'industrial' in style_text: - candidates = [profile for profile in ARRANGEMENT_PROFILES if profile['name'] in ['warehouse', 'festival']] or candidates - - if not candidates: - candidates = list(ARRANGEMENT_PROFILES) - - rng = random.Random(int(variant_seed) + 41) - selected = dict(rng.choice(candidates)) - selected['seed'] = int(variant_seed) - return selected - - def _extend_parallel_sends(self, role: str, sends: Dict[str, Any]) -> Dict[str, Any]: - resolved = dict(sends or {}) - if role in ['kick', 'clap', 'hat_closed', 'hat_open', 'top_loop', 'perc', 'ride', 'snare_fill', 'tom_fill']: - resolved.setdefault('glue', 0.1) - resolved.setdefault('heat', 0.05) - elif role in ['sub_bass', 'bass', 'stab']: - resolved.setdefault('glue', 0.08) - resolved.setdefault('heat', 0.08) - elif role in ['chords', 'pad', 'pluck', 'arp', 'lead', 'counter', 'vocal']: - resolved.setdefault('glue', 0.04) - elif role in ['reverse_fx', 'riser', 'impact', 'atmos', 'drone', 'crash']: - resolved.setdefault('glue', 0.03) - return resolved - - def _resolve_bus_for_role(self, role: str) -> Optional[str]: - return ROLE_BUS_ASSIGNMENTS.get(str(role or '').strip().lower(), 'music') - - def _get_section_variation(self, role: str, section_kind: str) -> Dict[str, Any]: - """ - Obtiene configuración de variación para un rol y sección. - - Retorna dict con: - - use: bool - si el rol debe usarse en esta sección - - sparse: bool - si usar variante sparse - - full: bool - si usar variante completa - - intensity: float - intensidad de 0 a 1 - - etc. - """ - if role not in SECTION_VARIATION_CONFIG: - return {'use': True, 'intensity': 1.0} - - role_config = SECTION_VARIATION_CONFIG[role] - return role_config.get(section_kind.lower(), {'use': True, 'intensity': 1.0}) - - def _should_vary_role_in_section(self, role: str, section_kind: str) -> bool: - """Determina si un rol debe variar en una sección dada.""" - if role not in SECTION_VARIATION_CONFIG: - return False - - config = self._get_section_variation(role, section_kind) - - # Si tiene clave 'use' explícita - if 'use' in config: - return config['use'] - - # Si tiene variantes específicas - return any(k in config for k in ['sparse', 'full', 'building', 'fading']) - - def _build_mix_bus_blueprint( - self, - profile: Dict[str, Any], - genre: str, - style: str, - reference_resolution: Optional[Dict[str, Any]] = None, - ) -> List[Dict[str, Any]]: - style_text = f"{genre} {style}".lower() - profile_name = str(profile.get('name', 'default')).lower() - reference_name = str(((reference_resolution or {}).get('reference') or {}).get('name', '')).lower() - - buses = [ - { - 'key': 'drums', - 'name': 'DRUM BUS', - 'color': BUS_TRACK_COLORS['drums'], - 'volume': 0.86, - 'pan': 0.0, - 'monitoring': 'in', - 'fx_chain': [ - {'device': 'Compressor', 'parameters': {'Threshold': -16.5}}, - {'device': 'Saturator', 'parameters': {'Drive': 1.2}}, - {'device': 'Utility', 'parameters': {'Gain': 0.2}}, - {'device': 'Limiter', 'parameters': {'Gain': 0.3}}, - ], - }, - { - 'key': 'bass', - 'name': 'BASS BUS', - 'color': BUS_TRACK_COLORS['bass'], - 'volume': 0.8, - 'pan': 0.0, - 'monitoring': 'in', - 'fx_chain': [ - {'device': 'Saturator', 'parameters': {'Drive': 1.3}}, - {'device': 'Compressor', 'parameters': {'Threshold': -18.0}}, - {'device': 'Utility', 'parameters': {'Stereo Width': 0.0}}, - {'device': 'Utility', 'parameters': {'Gain': 0.2}}, - ], - }, - { - 'key': 'music', - 'name': 'MUSIC BUS', - 'color': BUS_TRACK_COLORS['music'], - 'volume': 0.8, - 'pan': 0.0, - 'monitoring': 'in', - 'fx_chain': [ - {'device': 'Compressor', 'parameters': {'Threshold': -21.0}}, - {'device': 'Auto Filter', 'parameters': {'Frequency': 12800.0, 'Dry/Wet': 0.05}}, - {'device': 'Utility', 'parameters': {'Stereo Width': 1.12}}, - {'device': 'Utility', 'parameters': {'Gain': 0.2}}, - ], - }, - { - 'key': 'vocal', - 'name': 'VOCAL BUS', - 'color': BUS_TRACK_COLORS['vocal'], - 'volume': 0.82, - 'pan': 0.0, - 'monitoring': 'in', - 'fx_chain': [ - {'device': 'Echo', 'parameters': {'Dry/Wet': 0.05}}, - {'device': 'Compressor', 'parameters': {'Threshold': -18.0}}, - {'device': 'Hybrid Reverb', 'parameters': {'Dry/Wet': 0.05}}, - {'device': 'Utility', 'parameters': {'Gain': 0.2}}, - ], - }, - { - 'key': 'fx', - 'name': 'FX BUS', - 'color': BUS_TRACK_COLORS['fx'], - 'volume': 0.76, - 'pan': 0.0, - 'monitoring': 'in', - 'fx_chain': [ - {'device': 'Auto Filter', 'parameters': {'Frequency': 10200.0, 'Dry/Wet': 0.1}}, - {'device': 'Hybrid Reverb', 'parameters': {'Dry/Wet': 0.12}}, - {'device': 'Utility', 'parameters': {'Gain': -0.2}}, - {'device': 'Limiter', 'parameters': {'Gain': 0.0}}, - ], - }, - ] - - # ========================================================================= - # Apply BUS_GAIN_CALIBRATION as safe baseline BEFORE profile overrides - # ========================================================================= - self._style_adjustments_applied = [] - self._calibrated_bus_volumes = {} - - def find_device_in_chain(fx_chain, device_type): - for device in fx_chain: - if device.get('device') == device_type: - return device - return None - - for bus in buses: - bus_key = bus.get('key', '') - if bus_key not in BUS_GAIN_CALIBRATION: - continue - - calibration = BUS_GAIN_CALIBRATION[bus_key] - - if 'volume' in calibration: - bus['volume'] = calibration['volume'] - - fx_chain = bus.get('fx_chain', []) - - if 'compressor_threshold' in calibration: - compressor = find_device_in_chain(fx_chain, 'Compressor') - if compressor: - compressor['parameters']['Threshold'] = calibration['compressor_threshold'] - - if 'saturator_drive' in calibration: - saturator = find_device_in_chain(fx_chain, 'Saturator') - if saturator: - saturator['parameters']['Drive'] = calibration['saturator_drive'] - - if 'limiter_gain' in calibration: - limiter = find_device_in_chain(fx_chain, 'Limiter') - if limiter: - limiter['parameters']['Gain'] = calibration['limiter_gain'] - - if 'utility_gain' in calibration: - for device in fx_chain: - if device.get('device') == 'Utility': - if 'Gain' in device.get('parameters', {}): - device['parameters']['Gain'] = calibration['utility_gain'] - break - elif 'Stereo Width' not in device.get('parameters', {}): - device['parameters']['Gain'] = calibration['utility_gain'] - break - - # ========================================================================= - # Profile-specific overrides ON TOP of calibrated baselines - # ========================================================================= - if profile_name == 'warehouse': - buses[0]['name'] = 'DRUM BUNKER' - buses[0]['fx_chain'][1]['parameters']['Drive'] = 3.1 - buses[1]['name'] = 'LOW END BUS' - buses[1]['fx_chain'][0]['parameters']['Drive'] = 4.0 - buses[2]['fx_chain'][1]['parameters']['Frequency'] = 11200.0 - elif profile_name == 'festival': - buses[2]['name'] = 'MUSIC WIDE' - buses[2]['fx_chain'][2]['parameters']['Stereo Width'] = 1.14 - buses[3]['name'] = 'VOCAL TAIL' - buses[3]['fx_chain'][2]['parameters']['Dry/Wet'] = 0.08 - buses[4]['name'] = 'FX WASH' - buses[4]['fx_chain'][1]['parameters']['Dry/Wet'] = 0.14 - elif profile_name == 'swing': - buses[0]['name'] = 'DRUM POCKET' - buses[0]['fx_chain'][0]['parameters']['Threshold'] = -13.5 - buses[3]['name'] = 'VOCAL SLAP' - buses[3]['fx_chain'][0]['parameters']['Dry/Wet'] = 0.12 - elif profile_name == 'jackin': - buses[0]['name'] = 'DRUM CLUB' - buses[2]['name'] = 'MUSIC JACK' - buses[3]['name'] = 'VOX CLUB' - buses[4]['name'] = 'FX JAM' - elif profile_name == 'tech-house-club': - # Club-oriented tech-house with punchy drums and latin vocal treatment - buses[0]['name'] = 'DRUM CLUB' - buses[0]['volume'] = 0.95 - buses[0]['fx_chain'][0]['parameters']['Threshold'] = -15.5 - buses[0]['fx_chain'][1]['parameters']['Drive'] = 2.2 - buses[1]['name'] = 'BASS TUBE' - buses[1]['volume'] = 0.95 - buses[1]['fx_chain'][0]['parameters']['Drive'] = 2.5 - buses[1]['fx_chain'][1]['parameters']['Threshold'] = -17.0 - buses[2]['name'] = 'MUSIC JACK' - buses[2]['volume'] = 0.95 - buses[2]['fx_chain'][2]['parameters']['Stereo Width'] = 1.16 - buses[3]['name'] = 'VOCAL LATIN BUS' - buses[3]['volume'] = 0.95 - buses[3]['fx_chain'][0]['parameters']['Dry/Wet'] = 0.10 - buses[3]['fx_chain'][2]['parameters']['Dry/Wet'] = 0.08 - buses[4]['name'] = 'FX JAM' - buses[4]['volume'] = 0.95 - buses[4]['fx_chain'][1]['parameters']['Dry/Wet'] = 0.14 - elif profile_name == 'tech-house-deep': - # Minimal deep tech-house with subtle processing - buses[0]['name'] = 'DRUM DEEP' - buses[0]['volume'] = 0.95 - buses[0]['fx_chain'][0]['parameters']['Threshold'] = -18.0 - buses[0]['fx_chain'][1]['parameters']['Drive'] = 0.8 - buses[1]['name'] = 'SUB DEEP' - buses[1]['volume'] = 0.95 - buses[1]['fx_chain'][0]['parameters']['Drive'] = 1.0 - buses[1]['fx_chain'][1]['parameters']['Threshold'] = -20.0 - buses[2]['name'] = 'ATMOS DEEP' - buses[2]['volume'] = 0.95 - buses[2]['fx_chain'][0]['parameters']['Threshold'] = -24.0 - buses[2]['fx_chain'][1]['parameters']['Frequency'] = 10200.0 - buses[2]['fx_chain'][2]['parameters']['Stereo Width'] = 1.08 - buses[3]['name'] = 'VOX DEEP' - buses[3]['volume'] = 0.95 - buses[3]['fx_chain'][0]['parameters']['Dry/Wet'] = 0.04 - buses[3]['fx_chain'][2]['parameters']['Dry/Wet'] = 0.06 - buses[4]['name'] = 'FX DEEP' - buses[4]['volume'] = 0.95 - buses[4]['fx_chain'][1]['parameters']['Dry/Wet'] = 0.08 - elif profile_name == 'tech-house-funky': - # Groovy tech-house with wide stereo and bouncy feel - buses[0]['name'] = 'DRUM GROOVE' - buses[0]['volume'] = 0.95 - buses[0]['fx_chain'][0]['parameters']['Threshold'] = -14.5 - buses[0]['fx_chain'][1]['parameters']['Drive'] = 1.8 - buses[1]['name'] = 'BASS FUNK' - buses[1]['volume'] = 0.95 - buses[1]['fx_chain'][0]['parameters']['Drive'] = 2.0 - buses[1]['fx_chain'][1]['parameters']['Threshold'] = -16.5 - buses[2]['name'] = 'MUSIC GROOVE' - buses[2]['volume'] = 0.95 - buses[2]['fx_chain'][0]['parameters']['Threshold'] = -20.0 - buses[2]['fx_chain'][2]['parameters']['Stereo Width'] = 1.20 - buses[3]['name'] = 'VOCAL FUNK' - buses[3]['volume'] = 0.95 - buses[3]['fx_chain'][0]['parameters']['Dry/Wet'] = 0.12 - buses[3]['fx_chain'][2]['parameters']['Dry/Wet'] = 0.10 - buses[4]['name'] = 'FX SWING' - buses[4]['volume'] = 0.95 - buses[4]['fx_chain'][1]['parameters']['Dry/Wet'] = 0.16 - - if 'industrial' in style_text: - buses[0]['fx_chain'][1]['parameters']['Drive'] = max( - 3.4, - float(buses[0]['fx_chain'][1]['parameters'].get('Drive', 2.2)), - ) - buses[1]['fx_chain'][0]['parameters']['Drive'] = max( - 4.2, - float(buses[1]['fx_chain'][0]['parameters'].get('Drive', 3.2)), - ) - if 'latin' in style_text or any(term in reference_name for term in ['me gusta', 'química', 'quimica']): - buses[3]['name'] = 'VOCAL LATIN BUS' - buses[3]['fx_chain'][0]['parameters']['Dry/Wet'] = 0.14 - buses[3]['fx_chain'][2]['parameters']['Dry/Wet'] = 0.08 - buses[0]['fx_chain'][0]['parameters']['Threshold'] = -14.0 - - # ========================================================================= - # Apply STYLE_GAIN_ADJUSTMENTS as multipliers AFTER profile overrides - # ========================================================================= - for style_key, adjustments in STYLE_GAIN_ADJUSTMENTS.items(): - if style_key.lower() in style_text: - self._style_adjustments_applied.append(style_key) - - # Apply bus volume factors - if 'drums_bus_volume_factor' in adjustments: - for bus in buses: - if bus.get('key') == 'drums': - bus['volume'] = bus.get('volume', 0.8) * adjustments['drums_bus_volume_factor'] - - if 'bass_bus_volume_factor' in adjustments: - for bus in buses: - if bus.get('key') == 'bass': - bus['volume'] = bus.get('volume', 0.8) * adjustments['bass_bus_volume_factor'] - - if 'vocal_bus_volume_factor' in adjustments: - for bus in buses: - if bus.get('key') == 'vocal': - bus['volume'] = bus.get('volume', 0.8) * adjustments['vocal_bus_volume_factor'] - - if 'music_bus_volume_factor' in adjustments: - for bus in buses: - if bus.get('key') == 'music': - bus['volume'] = bus.get('volume', 0.8) * adjustments['music_bus_volume_factor'] - - if 'fx_bus_volume_factor' in adjustments: - for bus in buses: - if bus.get('key') == 'fx': - bus['volume'] = bus.get('volume', 0.8) * adjustments['fx_bus_volume_factor'] - - # Apply saturator_drive_factor to all bus saturators - if 'saturator_drive_factor' in adjustments: - for bus in buses: - fx_chain = bus.get('fx_chain', []) - saturator = find_device_in_chain(fx_chain, 'Saturator') - if saturator and 'Drive' in saturator.get('parameters', {}): - saturator['parameters']['Drive'] = ( - saturator['parameters']['Drive'] * adjustments['saturator_drive_factor'] - ) - - # Apply limiter_gain_factor to all bus limiters - if 'limiter_gain_factor' in adjustments: - for bus in buses: - fx_chain = bus.get('fx_chain', []) - limiter = find_device_in_chain(fx_chain, 'Limiter') - if limiter and 'Gain' in limiter.get('parameters', {}): - limiter['parameters']['Gain'] = ( - limiter['parameters']['Gain'] * adjustments['limiter_gain_factor'] - ) - - # Store final calibrated bus volumes - for bus in buses: - bus_key = bus.get('key', '') - if bus_key: - self._calibrated_bus_volumes[bus_key] = bus.get('volume', 0.0) - - # RCA Fix: Automatic Makeup and Output gain compensation - for bus in buses: - for device in bus.get('fx_chain', []): - device_type = device.get('device') - params = device.get('parameters', {}) - if device_type == 'Compressor' and 'Threshold' in params: - params['Makeup'] = round(abs(params['Threshold']) * 0.25, 1) - elif device_type == 'Saturator' and 'Drive' in params: - params['Output'] = round(-params['Drive'] * 1.5, 1) - - return buses - - def _build_return_blueprint( - self, - profile: Dict[str, Any], - genre: str, - style: str, - reference_resolution: Optional[Dict[str, Any]] = None, - ) -> List[Dict[str, Any]]: - style_text = f"{genre} {style}".lower() - profile_name = str(profile.get('name', 'default')).lower() - reference_name = str(((reference_resolution or {}).get('reference') or {}).get('name', '')).lower() - returns = [ - { - 'name': 'MCP SPACE', - 'send_key': 'space', - 'color': 56, - 'device_chain': [{'device': 'Hybrid Reverb', 'parameters': {'Dry/Wet': 1.0}}], - 'volume': 0.76, - }, - { - 'name': 'MCP ECHO', - 'send_key': 'echo', - 'color': 44, - 'device_chain': [{'device': 'Echo', 'parameters': {'Dry/Wet': 1.0}}], - 'volume': 0.72, - }, - { - 'name': 'MCP HEAT', - 'send_key': 'heat', - 'color': 12, - 'device_chain': [ - {'device': 'Saturator', 'parameters': {'Drive': 4.5}}, - {'device': 'Compressor', 'parameters': {'Threshold': -16.0}}, - ], - 'volume': 0.62, - }, - { - 'name': 'MCP GLUE', - 'send_key': 'glue', - 'color': 58, - 'device_chain': [ - {'device': 'Compressor', 'parameters': {'Threshold': -18.0}}, - {'device': 'Limiter', 'parameters': {'Gain': 0.0}}, - ], - 'volume': 0.68, - }, - ] - - if profile_name == 'warehouse': - returns[0]['name'] = 'MCP BUNKER' - returns[0]['device_chain'] = [ - {'device': 'Auto Filter', 'parameters': {'Frequency': 7200.0, 'Dry/Wet': 0.22}}, - {'device': 'Hybrid Reverb', 'parameters': {'Dry/Wet': 1.0}}, - ] - returns[1]['name'] = 'MCP DUB' - returns[1]['device_chain'] = [ - {'device': 'Echo', 'parameters': {'Dry/Wet': 1.0}}, - {'device': 'Auto Filter', 'parameters': {'Frequency': 8200.0, 'Dry/Wet': 0.14}}, - ] - returns[2]['device_chain'][0]['parameters']['Drive'] = 5.5 - returns[2]['volume'] = 0.66 - elif profile_name == 'festival': - returns[0]['name'] = 'MCP WIDE' - returns[0]['device_chain'] = [ - {'device': 'Hybrid Reverb', 'parameters': {'Dry/Wet': 1.0}}, - {'device': 'Utility', 'parameters': {'Stereo Width': 1.14}}, - ] - returns[1]['name'] = 'MCP TAIL' - returns[1]['device_chain'] = [ - {'device': 'Echo', 'parameters': {'Dry/Wet': 1.0}}, - {'device': 'Hybrid Reverb', 'parameters': {'Dry/Wet': 0.18}}, - ] - returns[0]['volume'] = 0.72 - returns[1]['volume'] = 0.68 - elif profile_name == 'swing': - returns[0]['name'] = 'MCP ROOM' - returns[1]['name'] = 'MCP SLAP' - returns[1]['device_chain'] = [ - {'device': 'Echo', 'parameters': {'Dry/Wet': 1.0}}, - {'device': 'Auto Filter', 'parameters': {'Frequency': 9800.0, 'Dry/Wet': 0.1}}, - ] - returns[2]['volume'] = 0.58 - elif profile_name == 'jackin': - returns[0]['name'] = 'MCP CLUB' - returns[1]['name'] = 'MCP SWING' - returns[2]['device_chain'][0]['parameters']['Drive'] = 3.8 - returns[3]['volume'] = 0.72 - elif profile_name == 'tech-house-club': - # Short reverb, mono delay, wide FX for club tech-house - returns[0]['name'] = 'REVERB SHORT' - returns[0]['device_chain'] = [ - {'device': 'Hybrid Reverb', 'parameters': {'Dry/Wet': 1.0, 'Decay Time': 0.6}}, - {'device': 'Auto Filter', 'parameters': {'Frequency': 8400.0, 'Dry/Wet': 0.08}}, - ] - returns[0]['volume'] = 0.70 - returns[1]['name'] = 'DELAY MONO' - returns[1]['device_chain'] = [ - {'device': 'Echo', 'parameters': {'Dry/Wet': 1.0, 'Ping Pong': 0.0}}, - {'device': 'Utility', 'parameters': {'Width': 0.0}}, - ] - returns[1]['volume'] = 0.68 - returns[2]['name'] = 'DRIVE HOT' - returns[2]['device_chain'][0]['parameters']['Drive'] = 4.0 - returns[2]['volume'] = 0.64 - returns[3]['name'] = 'GLUE BUS' - returns[3]['device_chain'][0]['parameters']['Threshold'] = -16.5 - returns[3]['volume'] = 0.70 - elif profile_name == 'tech-house-deep': - # Deep minimal returns with subtle processing - returns[0]['name'] = 'REVERB DEEP' - returns[0]['device_chain'] = [ - {'device': 'Hybrid Reverb', 'parameters': {'Dry/Wet': 1.0, 'Decay Time': 1.2}}, - {'device': 'Auto Filter', 'parameters': {'Frequency': 6200.0, 'Dry/Wet': 0.12}}, - ] - returns[0]['volume'] = 0.72 - returns[1]['name'] = 'DELAY DEEP' - returns[1]['device_chain'] = [ - {'device': 'Echo', 'parameters': {'Dry/Wet': 1.0, 'Feedback': 0.45}}, - ] - returns[1]['volume'] = 0.64 - returns[2]['name'] = 'SATURATE DEEP' - returns[2]['device_chain'][0]['parameters']['Drive'] = 2.5 - returns[2]['volume'] = 0.56 - returns[3]['name'] = 'GLUE MINIMAL' - returns[3]['device_chain'][0]['parameters']['Threshold'] = -20.0 - returns[3]['volume'] = 0.62 - elif profile_name == 'tech-house-funky': - # Groovy returns with modulation and swing - returns[0]['name'] = 'REVERB GROOVE' - returns[0]['device_chain'] = [ - {'device': 'Hybrid Reverb', 'parameters': {'Dry/Wet': 1.0, 'Decay Time': 0.8}}, - {'device': 'Chorus-Ensemble', 'parameters': {'Dry/Wet': 0.08}}, - ] - returns[0]['volume'] = 0.74 - returns[1]['name'] = 'DELAY GROOVE' - returns[1]['device_chain'] = [ - {'device': 'Echo', 'parameters': {'Dry/Wet': 1.0, 'Ping Pong': 0.4, 'Feedback': 0.35}}, - {'device': 'Auto Filter', 'parameters': {'Frequency': 8000.0, 'Dry/Wet': 0.1}}, - ] - returns[1]['volume'] = 0.70 - returns[2]['name'] = 'DRIVE FUNK' - returns[2]['device_chain'][0]['parameters']['Drive'] = 3.2 - returns[2]['device_chain'].append({'device': 'Chorus-Ensemble', 'parameters': {'Dry/Wet': 0.06}}) - returns[2]['volume'] = 0.60 - returns[3]['name'] = 'GLUE SWING' - returns[3]['device_chain'][0]['parameters']['Threshold'] = -15.5 - returns[3]['volume'] = 0.72 - - if 'latin' in style_text or any(term in reference_name for term in ['me gusta', 'química', 'quimica']): - returns[1]['name'] = 'MCP VOX ECHO' - returns[1]['device_chain'] = [ - {'device': 'Echo', 'parameters': {'Dry/Wet': 1.0}}, - {'device': 'Auto Filter', 'parameters': {'Frequency': 10800.0, 'Dry/Wet': 0.12}}, - ] - returns[0]['volume'] = max(0.68, float(returns[0]['volume']) - 0.04) - if 'industrial' in style_text: - returns[2]['name'] = 'MCP DRIVE' - returns[2]['device_chain'][0]['parameters']['Drive'] = max( - 4.8, - float(returns[2]['device_chain'][0]['parameters'].get('Drive', 4.5)) - ) - returns[3]['name'] = 'MCP BUS' - - return returns - - def _build_master_blueprint( - self, - profile: Dict[str, Any], - genre: str, - style: str, - reference_resolution: Optional[Dict[str, Any]] = None, - ) -> Dict[str, Any]: - style_text = f"{genre} {style}".lower() - profile_name = str(profile.get('name', 'default')).lower() - reference_name = str(((reference_resolution or {}).get('reference') or {}).get('name', '')).lower() - - # Start with default calibration values - calibration = dict(MASTER_CALIBRATION.get('default', {})) - - # Find matching profile (case-insensitive, partial match) - matched_profile = 'default' - profile_name_lower = profile_name.lower() - for cal_key in MASTER_CALIBRATION.keys(): - if cal_key.lower() in profile_name_lower or profile_name_lower in cal_key.lower(): - # Merge profile-specific values over defaults - profile_cal = MASTER_CALIBRATION[cal_key] - calibration.update(profile_cal) - matched_profile = cal_key - break - - # Track which profile was used - self._master_profile_used = matched_profile - - # Build master with calibrated values - # Master chain: Utility (gain staging) -> Saturator (color) -> Compressor (glue) -> Limiter (ceiling) - # Target: -1dB peak before limiter, -0.3dBFS ceiling after limiter - master = { - 'volume': calibration.get('volume', 0.85), - 'device_chain': [ - { - 'device': 'Utility', - 'parameters': { - 'Gain': calibration.get('utility_gain', -0.5), - 'Stereo Width': calibration.get('stereo_width', 1.04), - } - }, - { - 'device': 'Saturator', - 'parameters': {'Drive': calibration.get('saturator_drive', 0.12)} - }, - { - 'device': 'Compressor', - 'parameters': { - 'Ratio': calibration.get('compressor_ratio', 0.50), - 'Attack': calibration.get('compressor_attack', 0.30), - 'Release': calibration.get('compressor_release', 0.20), - } - }, - { - 'device': 'Limiter', - 'parameters': { - 'Gain': calibration.get('limiter_gain', 0.8), - 'Ceiling': calibration.get('limiter_ceiling', -0.3), - } - }, - ], - } - - # Apply style-based limiter_gain_factor from STYLE_GAIN_ADJUSTMENTS - for style_key, style_adj in STYLE_GAIN_ADJUSTMENTS.items(): - if style_key.lower() in style_text: - limiter_factor = style_adj.get('limiter_gain_factor') - if limiter_factor is not None: - master['device_chain'][3]['parameters']['Gain'] *= limiter_factor - break - - if 'industrial' in style_text: - master['device_chain'][1]['parameters']['Drive'] = max( - 0.8, - float(master['device_chain'][1]['parameters'].get('Drive', 0.3)) - ) - master['device_chain'][2]['parameters']['Ratio'] = max( - 0.7, - float(master['device_chain'][2]['parameters'].get('Ratio', 0.62)) - ) - - if 'latin' in style_text or any(term in reference_name for term in ['me gusta', 'química', 'quimica']): - master['device_chain'][0]['parameters']['Stereo Width'] = max( - 1.14, - float(master['device_chain'][0]['parameters'].get('Stereo Width', 1.1)) - ) - master['device_chain'][3]['parameters']['Gain'] = max( - 0.1, - float(master['device_chain'][3]['parameters'].get('Gain', 0.0)) - ) - - return master - - def _apply_role_gain_calibration(self, role: str, base_volume: float) -> Dict[str, float]: - """ - Apply ROLE_GAIN_CALIBRATION to a role's volume. - - Args: - role: The role name (e.g., 'kick', 'bass', 'clap') - base_volume: The base volume from ROLE_MIX - - Returns: - Dict with 'volume' and optionally 'saturator_drive' if calibrated - """ - if role not in ROLE_GAIN_CALIBRATION: - return {'volume': base_volume} - - calibration = ROLE_GAIN_CALIBRATION[role] - calibrated_volume = float(calibration.get('volume', base_volume)) - - # Apply peak_reduction if present - peak_reduction = calibration.get('peak_reduction', 0.0) - if peak_reduction > 0: - calibrated_volume *= (1.0 - float(peak_reduction)) - self._peak_reductions_count += 1 - - result = {'volume': round(max(0.0, min(1.0, calibrated_volume)), 3)} - - # Include saturator_drive if present in calibration - if 'saturator_drive' in calibration: - result['saturator_drive'] = float(calibration['saturator_drive']) - - self._gain_calibration_overrides_count += 1 - - return result - - def _shape_mix_profile(self, role: str, mix_profile: Dict[str, Any], profile: Dict[str, Any], style: str) -> Dict[str, Any]: - shaped = { - 'volume': float(mix_profile.get('volume', 0.72)), - 'pan': float(mix_profile.get('pan', 0.0)), - 'sends': dict(mix_profile.get('sends', {})), - } - - # Apply ROLE_GAIN_CALIBRATION if available - overrides base volume - calibration = self._apply_role_gain_calibration(role, shaped['volume']) - if calibration.get('volume') is not None: - shaped['volume'] = calibration['volume'] - if calibration.get('saturator_drive') is not None: - shaped['saturator_drive'] = calibration['saturator_drive'] - - profile_name = str(profile.get('name', 'default')).lower() - pan_width = float(profile.get('pan_width', 0.16) or 0.16) - style_text = str(style or '').lower() - - if role in ['hat_closed', 'hat_open', 'top_loop', 'perc', 'ride', 'pluck', 'arp', 'counter', 'vocal']: - shaped['pan'] = max(-1.0, min(1.0, shaped['pan'] * (1.0 + pan_width))) - - if profile_name == 'warehouse': - if role in ['kick', 'bass', 'sub_bass']: - shaped['volume'] *= 1.03 - if role in ['pad', 'drone', 'atmos']: - shaped['sends']['space'] = shaped['sends'].get('space', 0.0) * 0.88 - if role in ['reverse_fx', 'riser', 'impact']: - shaped['sends']['heat'] = max(shaped['sends'].get('heat', 0.0), 0.08) - elif profile_name == 'festival': - if role in ['lead', 'chords', 'pad', 'arp', 'vocal']: - shaped['volume'] *= 1.04 - shaped['sends']['space'] = shaped['sends'].get('space', 0.0) * 1.15 - if role in ['kick', 'clap']: - shaped['sends']['glue'] = max(shaped['sends'].get('glue', 0.0), 0.12) - elif profile_name == 'swing': - if role in ['perc', 'top_loop', 'ride', 'vocal', 'pluck']: - shaped['sends']['echo'] = shaped['sends'].get('echo', 0.0) * 1.14 - if role in ['kick', 'sub_bass']: - shaped['volume'] *= 0.98 - elif profile_name == 'jackin': - if role in ['clap', 'perc', 'vocal', 'counter']: - shaped['sends']['echo'] = shaped['sends'].get('echo', 0.0) * 1.08 - if role in ['top_loop', 'ride']: - shaped['volume'] *= 1.03 - elif profile_name == 'tech-house-club': - # Club-oriented: punchy drums, present vocals, tight bass - if role in ['kick', 'clap']: - shaped['volume'] *= 1.02 - shaped['sends']['glue'] = max(shaped['sends'].get('glue', 0.0), 0.10) - if role in ['bass', 'sub_bass']: - shaped['sends']['heat'] = max(shaped['sends'].get('heat', 0.0), 0.06) - if role in ['vocal', 'counter']: - shaped['sends']['echo'] = shaped['sends'].get('echo', 0.0) * 1.10 - if role in ['hat_open', 'top_loop', 'ride']: - shaped['sends']['space'] = shaped['sends'].get('space', 0.0) * 0.92 - elif profile_name == 'tech-house-deep': - # Deep minimal: subtle processing, wide stereo - if role in ['kick', 'sub_bass']: - shaped['volume'] *= 0.98 - if role in ['pad', 'drone', 'atmos', 'chords']: - shaped['sends']['space'] = shaped['sends'].get('space', 0.0) * 1.12 - if role in ['perc', 'top_loop']: - shaped['volume'] *= 0.95 - shaped['sends']['echo'] = shaped['sends'].get('echo', 0.0) * 0.88 - elif profile_name == 'tech-house-funky': - # Funky groove: wider pan, more echo, bouncy feel - if role in ['perc', 'top_loop', 'ride']: - shaped['sends']['echo'] = shaped['sends'].get('echo', 0.0) * 1.18 - if role in ['bass', 'sub_bass']: - shaped['sends']['heat'] = max(shaped['sends'].get('heat', 0.0), 0.05) - if role in ['vocal', 'pluck', 'arp']: - shaped['sends']['space'] = shaped['sends'].get('space', 0.0) * 1.08 - if role in ['clap', 'hat_closed']: - shaped['volume'] *= 1.02 - - if 'latin' in style_text and role in ['perc', 'top_loop', 'ride', 'vocal']: - shaped['sends']['echo'] = shaped['sends'].get('echo', 0.0) * 1.12 - shaped['pan'] = max(-1.0, min(1.0, shaped['pan'] * 1.08)) - if 'industrial' in style_text and role in ['kick', 'bass', 'stab', 'impact', 'riser']: - shaped['sends']['heat'] = max(shaped['sends'].get('heat', 0.0), 0.09) - - shaped['volume'] = round(max(0.0, min(1.0, shaped['volume'])), 3) - shaped['pan'] = round(max(-1.0, min(1.0, shaped['pan'])), 3) - shaped['sends'] = { - send_key: round(max(0.0, min(1.0, float(send_value))), 3) - for send_key, send_value in shaped['sends'].items() - } - return shaped - - def _shape_role_fx_chain(self, role: str, profile: Dict[str, Any], style: str) -> List[Dict[str, Any]]: - chain = [dict(item) for item in ROLE_FX_CHAINS.get(role, [])] - profile_name = str(profile.get('name', 'default')).lower() - style_text = str(style or '').lower() - - if profile_name == 'warehouse': - if role in ['kick', 'bass', 'stab']: - chain.append({'device': 'Compressor', 'parameters': {'Threshold': -18.0}}) - if role in ['atmos', 'drone', 'pad']: - chain.append({'device': 'Auto Filter', 'parameters': {'Frequency': 7600.0, 'Dry/Wet': 0.14}}) - elif profile_name == 'festival': - if role in ['lead', 'arp', 'vocal']: - chain.append({'device': 'Hybrid Reverb', 'parameters': {'Dry/Wet': 0.1}}) - if role in ['chords', 'pad']: - chain.append({'device': 'Utility', 'parameters': {'Width': 140.0}}) - elif profile_name == 'swing': - if role in ['perc', 'top_loop', 'ride', 'vocal']: - chain.append({'device': 'Echo', 'parameters': {'Dry/Wet': 0.08}}) - elif profile_name == 'jackin': - if role in ['clap', 'perc', 'vocal', 'counter']: - chain.append({'device': 'Saturator', 'parameters': {'Drive': 1.5}}) - elif profile_name == 'tech-house-club': - # Club: punchy drums, saturated bass, crisp tops - if role in ['kick', 'clap']: - chain.append({'device': 'Compressor', 'parameters': {'Threshold': -16.0, 'Attack': 0.02}}) - if role in ['bass', 'sub_bass']: - chain.append({'device': 'Saturator', 'parameters': {'Drive': 2.0}}) - if role in ['hat_closed', 'hat_open', 'top_loop']: - chain.append({'device': 'Auto Filter', 'parameters': {'Frequency': 12000.0, 'Dry/Wet': 0.12}}) - if role in ['vocal', 'counter']: - chain.append({'device': 'Echo', 'parameters': {'Dry/Wet': 0.08}}) - elif profile_name == 'tech-house-deep': - # Deep: subtle saturation, atmospheric processing - if role in ['kick', 'bass']: - chain.append({'device': 'Compressor', 'parameters': {'Threshold': -20.0}}) - if role in ['pad', 'drone', 'atmos']: - chain.append({'device': 'Hybrid Reverb', 'parameters': {'Dry/Wet': 0.12}}) - if role in ['chords', 'pluck']: - chain.append({'device': 'Auto Filter', 'parameters': {'Frequency': 9200.0, 'Dry/Wet': 0.08}}) - elif profile_name == 'tech-house-funky': - # Funky: groove-enhancing FX, modulation - if role in ['perc', 'top_loop', 'ride']: - chain.append({'device': 'Echo', 'parameters': {'Dry/Wet': 0.10, 'Ping Pong': 0.3}}) - if role in ['bass', 'sub_bass']: - chain.append({'device': 'Saturator', 'parameters': {'Drive': 1.8}}) - if role in ['vocal', 'pluck', 'arp']: - chain.append({'device': 'Chorus-Ensemble', 'parameters': {'Dry/Wet': 0.06}}) - if role in ['clap', 'hat_closed']: - chain.append({'device': 'Saturator', 'parameters': {'Drive': 1.2}}) - - if 'industrial' in style_text and role in ['kick', 'bass', 'impact', 'riser']: - chain.append({'device': 'Saturator', 'parameters': {'Drive': 1.8}}) - if 'latin' in style_text and role in ['perc', 'top_loop', 'ride', 'vocal']: - chain.append({'device': 'Auto Filter', 'parameters': {'Frequency': 11200.0, 'Dry/Wet': 0.1}}) - - return chain - - def _get_section_drum_variant(self, role: str, section: Dict[str, Any]) -> str: - """Get appropriate drum variant for section and role with cross-generation diversity.""" - kind = str(section.get('kind', 'drop')).lower() - role_lower = role.lower() - - if role_lower not in DRUM_SECTION_VARIANTS.get(kind, {}): - return 'straight' - - variants = list(DRUM_SECTION_VARIANTS[kind][role_lower]) - valid_variants = [v for v in variants if v in DRUM_PATTERN_BANKS.get(role_lower, {})] - if not valid_variants and role_lower in DRUM_PATTERN_BANKS: - valid_variants = list(DRUM_PATTERN_BANKS[role_lower].keys()) - - if not valid_variants: - return 'straight' - - rng = self._section_rng(section, role, salt=1) - - if len(valid_variants) > 1: - scored_variants = [] - for v in valid_variants: - penalty = _get_pattern_variant_penalty('drum', f'{role_lower}_{v}') - score = rng.random() - penalty - scored_variants.append((score, v)) - scored_variants.sort(reverse=True) - chosen = scored_variants[0][1] - else: - chosen = valid_variants[0] - - _record_pattern_variant_usage('drum', f'{role_lower}_{chosen}') - return chosen - - def _generate_drum_pattern_from_bank(self, role: str, variant: str, - section_length: float, - velocity_base: int = 100) -> List[Dict[str, Any]]: - """Generate drum pattern from pattern bank.""" - role_lower = role.lower() - - if role_lower not in DRUM_PATTERN_BANKS: - return [] - - bank = DRUM_PATTERN_BANKS[role_lower] - if variant not in bank: - variant = list(bank.keys())[0] # Fallback to first - - positions = bank[variant] - notes = [] - - # Determine pitch based on role - pitch_map = { - 'kick': 36, 'clap': 39, 'hat_closed': 42, - 'hat_open': 46, 'perc': 50, 'ride': 51 - } - pitch = pitch_map.get(role_lower, 36) - - for pos in positions: - # Repeat pattern for each bar - for bar in range(int(section_length // 4)): - start = pos + (bar * 4.0) - if start < section_length: - # Add slight velocity variation - velocity = max(60, min(127, velocity_base + random.randint(-10, 10))) - duration = 0.1 if role_lower in ['hat_closed', 'hat_open', 'ride'] else 0.15 - notes.append(self._make_note(pitch, start, duration, velocity)) - - logger.debug(f"Generated drum pattern from bank: role={role}, variant={variant}, notes={len(notes)}") - return notes - - def _get_section_bass_variant(self, section: Dict[str, Any]) -> str: - """Get appropriate bass variant for section with cross-generation diversity.""" - kind = str(section.get('kind', 'drop')).lower() - - if kind not in BASS_SECTION_VARIANTS: - return 'anchor' - - variants = list(BASS_SECTION_VARIANTS[kind]) - valid_variants = [v for v in variants if v in BASS_PATTERN_BANKS] - if not valid_variants: - valid_variants = list(BASS_PATTERN_BANKS.keys()) - - rng = self._section_rng(section, 'bass', salt=2) - - if len(valid_variants) > 1: - scored_variants = [] - for v in valid_variants: - penalty = _get_pattern_variant_penalty('bass', v) - score = rng.random() - penalty - scored_variants.append((score, v)) - scored_variants.sort(reverse=True) - chosen = scored_variants[0][1] - else: - chosen = valid_variants[0] if valid_variants else 'anchor' - - _record_pattern_variant_usage('bass', chosen) - return chosen - - def _compute_section_signature(self, section: Dict[str, Any]) -> str: - """Compute a signature for section to detect repetition.""" - section = self._ensure_section_pattern_variants(section) - signature_parts = [] - drum_role_variants = dict(section.get('drum_role_variants') or {}) - - signature_parts.append(f"kick:{drum_role_variants.get('kick', section.get('drum_variant', 'default'))}") - signature_parts.append(f"clap:{drum_role_variants.get('clap', section.get('drum_variant', 'default'))}") - signature_parts.append(f"hat:{drum_role_variants.get('hat_closed', section.get('drum_variant', 'default'))}") - signature_parts.append(f"bass:{section.get('bass_bank_variant', section.get('bass_variant', 'default'))}") - signature_parts.append(f"lead:{section.get('melodic_bank_variant', section.get('melodic_variant', 'default'))}") - signature_parts.append(f"fill:{section.get('transition_fill', 'none')}") - - # Add density and swing - density = section.get('density', 1.0) - swing = section.get('swing', 0.0) - signature_parts.append(f"d:{density:.1f}") - signature_parts.append(f"s:{swing:.2f}") - - return "|".join(signature_parts) - - def _check_section_repetition(self, sections: List[Dict[str, Any]]) -> List[Dict[str, Any]]: - """Check and warn about excessive section repetition.""" - signatures = [] - consecutive_same = 0 - max_consecutive = 2 - - for i, section in enumerate(sections): - self._ensure_section_pattern_variants(section) - sig = self._compute_section_signature(section) - - if signatures and signatures[-1] == sig: - consecutive_same += 1 - if consecutive_same >= max_consecutive: - logger.warning("REPETITION: %d consecutive sections with same signature: %s", - consecutive_same + 1, sig) - self._force_section_pattern_variation(section) - sig = self._compute_section_signature(section) - else: - consecutive_same = 0 - - signatures.append(sig) - - return sections - - def _record_section_variant(self, section: Dict[str, Any], role: str, variant: str): - """Record variant used for a role in a section.""" - key = f'{role}_variant' - section[key] = variant - - def _choose_alternate_variant(self, options: List[str], current: Optional[str], rng: random.Random) -> Optional[str]: - ordered: List[str] = [] - for option in options: - if option not in ordered: - ordered.append(option) - if not ordered: - return current - alternatives = [option for option in ordered if option != current] - if not alternatives: - return current or ordered[0] - return rng.choice(alternatives) - - def _ensure_section_pattern_variants(self, section: Dict[str, Any]) -> Dict[str, Any]: - _kind = str(section.get('kind', 'drop')).lower() # noqa: F841 - used by helper methods via section dict - drum_role_variants = dict(section.get('drum_role_variants') or {}) - for role in ['kick', 'clap', 'hat_closed', 'hat_open', 'perc', 'ride']: - if role in drum_role_variants: - continue - variant = self._get_section_drum_variant(role, section) - if variant in DRUM_PATTERN_BANKS.get(role, {}): - drum_role_variants[role] = variant - self._record_section_variant(section, role, variant) - section['drum_role_variants'] = drum_role_variants - - bass_bank_variant = str(section.get('bass_bank_variant', '') or '') - if bass_bank_variant not in BASS_PATTERN_BANKS: - bass_bank_variant = self._get_section_bass_variant(section) - section['bass_bank_variant'] = bass_bank_variant - self._record_section_variant(section, 'bass_bank', str(section.get('bass_bank_variant', 'anchor'))) - - melodic_bank_variant = str(section.get('melodic_bank_variant', '') or '') - if melodic_bank_variant not in MELODIC_PATTERN_BANKS: - melodic_bank_variant = self._get_section_melodic_variant(section) - section['melodic_bank_variant'] = melodic_bank_variant - self._record_section_variant(section, 'melodic_bank', str(section.get('melodic_bank_variant', 'motif'))) - section.setdefault('pattern_variant_ready', True) - return section - - def _force_section_pattern_variation(self, section: Dict[str, Any]) -> Dict[str, Any]: - kind = str(section.get('kind', 'drop')).lower() - self._ensure_section_pattern_variants(section) - drum_role_variants = dict(section.get('drum_role_variants') or {}) - - for role in ['kick', 'clap', 'hat_closed']: - options = DRUM_SECTION_VARIANTS.get(kind, {}).get(role, []) - current = drum_role_variants.get(role) - next_variant = self._choose_alternate_variant(options, current, self._section_rng(section, role, salt=101)) - if next_variant: - drum_role_variants[role] = next_variant - self._record_section_variant(section, role, next_variant) - section['drum_role_variants'] = drum_role_variants - - bass_options = BASS_SECTION_VARIANTS.get(kind, []) - bass_variant = self._choose_alternate_variant( - bass_options, - str(section.get('bass_bank_variant', '') or ''), - self._section_rng(section, 'bass', salt=102), - ) - if bass_variant: - section['bass_bank_variant'] = bass_variant - self._record_section_variant(section, 'bass_bank', bass_variant) - - melodic_options = MELODIC_SECTION_VARIANTS.get(kind, []) - melodic_variant = self._choose_alternate_variant( - melodic_options, - str(section.get('melodic_bank_variant', '') or ''), - self._section_rng(section, 'melodic', salt=103), - ) - if melodic_variant: - section['melodic_bank_variant'] = melodic_variant - self._record_section_variant(section, 'melodic_bank', melodic_variant) - - return section - - def _generate_bass_pattern_from_bank(self, variant: str, key: str, - section_length: float, - velocity_base: int = 95) -> List[Dict[str, Any]]: - """Generate bass pattern from pattern bank.""" - if variant not in BASS_PATTERN_BANKS: - variant = 'anchor' - - bank = BASS_PATTERN_BANKS[variant] - positions = bank['positions'] - durations = bank['durations'] - style = bank.get('style', 'root') - - root_note = key[:-1] if len(key) > 1 else key - root_midi = self.note_name_to_midi(root_note, 2) - - notes = [] - for bar in range(int(section_length // 4)): - for i, pos in enumerate(positions): - start = pos + (bar * 4.0) - if start < section_length: - duration = durations[i] if i < len(durations) else 0.4 - velocity = max(70, min(120, velocity_base + random.randint(-8, 8))) - - # Adjust pitch based on style - pitch = root_midi - if style == 'ascending' and bar > 0: - pitch += min(bar, 5) # Rise over bars - elif style == 'syncopated' and i % 2 == 1: - pitch += 5 # Fifth on offbeats - - notes.append(self._make_note(pitch, start, duration, velocity)) - - logger.debug(f"Generated bass pattern from bank: variant={variant}, notes={len(notes)}") - return notes - - def _vary_drum_notes(self, notes: List[Dict[str, Any]], role: str, section: Dict[str, Any], - section_length: float) -> List[Dict[str, Any]]: - section = self._ensure_section_pattern_variants(section) - role_variant = str((section.get('drum_role_variants') or {}).get(role, '') or '').lower() - kind = str(section.get('kind', 'drop')).lower() - density = float(section.get('density', 1.0)) - _ = int(section.get('energy', 1)) - variant = str(section.get('drum_variant', 'straight')).lower() - swing = float(section.get('swing', 0.0)) - tightness = float(self._current_generation_profile.get('drum_tightness', 1.0)) - rng = self._section_rng(section, role, salt=5) - - if role_variant in DRUM_PATTERN_BANKS.get(role, {}): - logger.debug(f"Using section pattern bank for {role} with variant {role_variant} in section {kind}") - bank_notes = self._generate_drum_pattern_from_bank(role, role_variant, section_length) - if bank_notes: - use_bank_prob = 0.85 if kind in ['intro', 'break', 'outro'] else 0.95 - if rng.random() < use_bank_prob or not notes: - return bank_notes - - if not notes: - if role in DRUM_PATTERN_BANKS: - all_variants = list(DRUM_PATTERN_BANKS[role].keys()) - if all_variants: - fallback_variant = rng.choice(all_variants) - return self._generate_drum_pattern_from_bank(role, fallback_variant, section_length) - return [] - - varied = list(notes) - - if variant == 'skip' and role in ['hat_closed', 'hat_open', 'top_loop', 'perc', 'ride']: - varied = self._apply_density_mask(varied, section, role, keep_probability=min(0.94, max(0.54, density - 0.08))) - elif variant == 'pressure' and role in ['kick', 'hat_closed', 'perc']: - pressure_notes = [] - for bar_start in range(0, int(section_length), 4): - if role == 'kick' and rng.random() > 0.35: - pressure_notes.append(self._make_note(36, min(section_length - 0.05, bar_start + 3.5), 0.12, 92)) - elif role == 'hat_closed' and rng.random() > 0.45: - pressure_notes.append(self._make_note(42, min(section_length - 0.05, bar_start + 3.75), 0.06, 58)) - elif role == 'perc' and rng.random() > 0.5: - pressure_notes.append(self._make_note(50, min(section_length - 0.05, bar_start + 3.25), 0.12, 74)) - varied = self._merge_section_notes(varied, pressure_notes, section_length) - elif variant == 'shuffle' and role not in ['kick', 'clap', 'sc_trigger', 'crash']: - varied = self._apply_swing(varied, swing or (0.035 / max(0.8, tightness)), section_length) - - if swing > 0.0 and role in ['top_loop', 'perc', 'ride']: - varied = self._apply_swing(varied, swing * 0.55, section_length) - - return varied - - def _vary_bass_notes(self, notes: List[Dict[str, Any]], role: str, key: str, - section: Dict[str, Any], section_length: float) -> List[Dict[str, Any]]: - section = self._ensure_section_pattern_variants(section) - bank_variant = str(section.get('bass_bank_variant', '') or '').lower() - kind = str(section.get('kind', 'drop')).lower() - variant = str(section.get('bass_variant', 'anchor')).lower() - - if bank_variant in BASS_PATTERN_BANKS: - logger.debug(f"Using section bass pattern bank for variant {bank_variant} in section {kind}") - return self._generate_bass_pattern_from_bank(bank_variant, key, section_length) - - if not notes: - if bank_variant in BASS_PATTERN_BANKS: - return self._generate_bass_pattern_from_bank(bank_variant, key, section_length) - all_variants = list(BASS_PATTERN_BANKS.keys()) - if all_variants: - rng = self._section_rng(section, role, salt=7) - fallback = rng.choice(all_variants) - return self._generate_bass_pattern_from_bank(fallback, key, section_length) - return [] - - profile_motion = str(self._current_generation_profile.get('bass_motion', 'locked')).lower() - rng = self._section_rng(section, role, salt=7) - root_note = key[:-1] if len(key) > 1 else key - scale_name = 'minor' if 'm' in key.lower() else 'major' - root_midi = self.note_name_to_midi(root_note, 2) - scale_notes = self.get_scale_notes(root_midi, scale_name) - - varied = [] - for index, note in enumerate(notes): - pitch = int(note['pitch']) - start = float(note['start']) - duration = float(note['duration']) - velocity = int(note['velocity']) - - if variant == 'anchor' and (start % 4.0) < 0.001: - pitch = root_midi - duration = max(duration, 0.5) - elif variant == 'bounce' and (start % 1.0) >= 0.5: - velocity = min(124, velocity + 8) - duration = max(0.18, duration * 0.82) - elif variant == 'syncopated' and (start % 1.0) < 0.001 and rng.random() > 0.4: - start = min(section_length - 0.05, start + 0.25) - duration = max(0.16, duration * 0.68) - elif variant == 'pedal' and index % 3 == 0: - pitch = root_midi - - if profile_motion == 'lifted' and index % 8 == 6: - pitch += 12 - elif profile_motion == 'syncopated' and rng.random() > 0.72: - pitch = scale_notes[(index + 4) % len(scale_notes)] - elif profile_motion == 'bouncy' and (start % 4.0) >= 2.0: - velocity = min(124, velocity + 5) - - varied.append(self._make_note(pitch, start, duration, velocity)) - - return self._shape_notes_for_section(varied, kind, role, section_length) - - def _get_section_melodic_variant(self, section: Dict[str, Any]) -> str: - """Get appropriate melodic variant for section with cross-generation diversity.""" - kind = str(section.get('kind', 'drop')).lower() - - if kind not in MELODIC_SECTION_VARIANTS: - return 'motif' - - variants = list(MELODIC_SECTION_VARIANTS[kind]) - valid_variants = [v for v in variants if v in MELODIC_PATTERN_BANKS] - if not valid_variants: - valid_variants = list(MELODIC_PATTERN_BANKS.keys()) - - rng = self._section_rng(section, 'melodic', salt=3) - - if len(valid_variants) > 1: - scored_variants = [] - for v in valid_variants: - penalty = _get_pattern_variant_penalty('melodic', v) - score = rng.random() - penalty - scored_variants.append((score, v)) - scored_variants.sort(reverse=True) - chosen = scored_variants[0][1] - else: - chosen = valid_variants[0] if valid_variants else 'motif' - - _record_pattern_variant_usage('melodic', chosen) - return chosen - - def _generate_melodic_pattern_from_bank(self, variant: str, key: str, - scale_name: str, - section_length: float, - velocity_base: int = 90) -> List[Dict[str, Any]]: - """Generate melodic pattern from pattern bank.""" - if variant not in MELODIC_PATTERN_BANKS: - variant = 'motif' - - bank = MELODIC_PATTERN_BANKS[variant] - intervals = bank['intervals'] - rhythm = bank['rhythm'] - durations = bank['durations'] - - root_note = key[:-1] if len(key) > 1 else key - root_midi = self.note_name_to_midi(root_note, 5) - scale_notes = self.get_scale_notes(root_midi, scale_name) - - notes = [] - for bar in range(int(section_length // 4)): - for i, pos in enumerate(rhythm): - start = pos + (bar * 4.0) - if start < section_length: - interval = intervals[i] if i < len(intervals) else intervals[-1] - pitch = scale_notes[interval % len(scale_notes)] - duration = durations[i] if i < len(durations) else 0.3 - velocity = max(60, min(110, velocity_base + random.randint(-10, 10))) - - notes.append(self._make_note(pitch, start, duration, velocity)) - - logger.debug(f"Generated melodic pattern from bank: variant={variant}, notes={len(notes)}") - return notes - - def _vary_melodic_notes(self, notes: List[Dict[str, Any]], role: str, key: str, scale_name: str, - section: Dict[str, Any], section_length: float) -> List[Dict[str, Any]]: - section = self._ensure_section_pattern_variants(section) - bank_variant = str(section.get('melodic_bank_variant', '') or '').lower() - kind = str(section.get('kind', 'drop')).lower() - - if bank_variant in MELODIC_PATTERN_BANKS: - logger.debug(f"Using section melodic pattern bank for variant {bank_variant} in section {kind}") - return self._generate_melodic_pattern_from_bank(bank_variant, key, scale_name, section_length) - - if not notes: - if bank_variant in MELODIC_PATTERN_BANKS: - return self._generate_melodic_pattern_from_bank(bank_variant, key, scale_name, section_length) - all_variants = list(MELODIC_PATTERN_BANKS.keys()) - if all_variants: - rng = self._section_rng(section, role, salt=11) - fallback = rng.choice(all_variants) - return self._generate_melodic_pattern_from_bank(fallback, key, scale_name, section_length) - return [] - - variant = str(section.get('melodic_variant', 'motif')).lower() - profile_motion = str(self._current_generation_profile.get('melodic_motion', 'restrained')).lower() - rng = self._section_rng(section, role, salt=11) - root_note = key[:-1] if len(key) > 1 else key - root_midi = self.note_name_to_midi(root_note, 5) - scale_notes = self.get_scale_notes(root_midi, scale_name) - - transformed = [] - for index, note in enumerate(notes): - start = float(note['start']) - pitch = int(note['pitch']) - duration = float(note['duration']) - velocity = int(note['velocity']) - keep = True - - if variant == 'response' and int(start / 2.0) % 2 == 0 and role in ['lead', 'pluck', 'counter']: - keep = False - elif variant == 'lift' and index % 4 == 3: - pitch += 12 - velocity = min(124, velocity + 10) - elif variant == 'descend' and index % 5 == 4: - pitch -= 12 - duration = max(0.16, duration * 0.9) - elif variant == 'drone': - keep = (start % 4.0) < 0.001 or duration >= 0.5 - if keep: - pitch = scale_notes[index % min(3, len(scale_notes))] - duration = max(duration, 1.2) - - if keep and profile_motion in ['anthemic', 'hooky'] and role in ['lead', 'arp', 'pluck']: - if rng.random() > 0.78: - pitch += 12 - elif profile_motion == 'hooky' and rng.random() > 0.84: - start = min(section_length - 0.05, start + 0.25) - - if keep and profile_motion == 'call_response' and role in ['counter', 'pluck'] and (start % 4.0) < 2.0: - velocity = max(52, velocity - 8) - - if keep: - transformed.append(self._make_note(pitch, start, duration, velocity)) - - if role in ['arp', 'pluck'] and float(section.get('swing', 0.0)) > 0.0: - transformed = self._apply_swing(transformed, float(section.get('swing', 0.0)) * 0.45, section_length) - - return self._shape_notes_for_section(transformed, kind, role, section_length) - - def _transpose_notes(self, notes: List[Dict[str, Any]], semitones: int) -> List[Dict[str, Any]]: - return [ - self._make_note(note['pitch'] + semitones, note['start'], note['duration'], note['velocity']) - for note in notes - ] - - def _scale_note_lengths(self, notes: List[Dict[str, Any]], factor: float, minimum: float = 0.1) -> List[Dict[str, Any]]: - scaled = [] - for note in notes: - scaled.append( - self._make_note( - note['pitch'], - note['start'], - max(minimum, float(note['duration']) * factor), - note['velocity'], - ) - ) - return scaled - - def _shape_notes_for_section(self, notes: List[Dict[str, Any]], section_kind: str, role: str, - section_length: float) -> List[Dict[str, Any]]: - if not notes: - return [] - - shaped = [] - for note in notes: - start = float(note['start']) - keep = True - - if section_kind in ['intro', 'outro'] and role in ['bass', 'sub_bass', 'lead', 'pluck', 'arp', 'counter']: - keep = int(start * 2) % 4 == 0 - elif section_kind == 'break' and role in ['bass', 'sub_bass', 'lead', 'pluck', 'arp', 'counter', 'clap', 'hat_open', 'ride']: - keep = int(start) % 4 == 0 - - if keep and start < section_length: - duration = min(float(note['duration']), section_length - start) - shaped.append(self._make_note(note['pitch'], start, duration, note['velocity'])) - return shaped - - def _merge_section_notes(self, base_notes: List[Dict[str, Any]], extra_notes: List[Dict[str, Any]], - section_length: float) -> List[Dict[str, Any]]: - merged = [] - for note in list(base_notes) + list(extra_notes): - start = float(note['start']) - if start >= section_length: - continue - duration = min(float(note['duration']), max(0.05, section_length - start)) - merged.append(self._make_note(note['pitch'], start, duration, note['velocity'])) - merged.sort(key=lambda item: (item['start'], item['pitch'])) - return merged - - def _build_drum_fill(self, role: str, section_length: float, intensity: int) -> List[Dict[str, Any]]: - fill_start = max(0.0, section_length - 1.0) - if role == 'kick' and intensity >= 3: - return [self._make_note(36, fill_start + step, 0.14, 112 + (idx % 2) * 8) for idx, step in enumerate([0.0, 0.25, 0.5, 0.75])] - if role == 'clap' and intensity >= 3: - return [self._make_note(39, fill_start + step, 0.18, 92 + idx * 6) for idx, step in enumerate([0.25, 0.5, 0.75])] - if role == 'hat_closed': - return [self._make_note(42, fill_start + (idx * 0.125), 0.06, 64 + (idx % 4) * 6) for idx in range(8)] - if role == 'perc' and intensity >= 2: - return [ - self._make_note(37, fill_start + 0.125, 0.08, 72), - self._make_note(47, fill_start + 0.375, 0.08, 76), - self._make_note(50, fill_start + 0.625, 0.1, 82), - ] - return [] - - def _build_turnaround_notes(self, key: str, scale_name: str, section_length: float, - octave: int, velocity: int = 92) -> List[Dict[str, Any]]: - root_note = key[:-1] if len(key) > 1 else key - root_midi = self.note_name_to_midi(root_note, octave) - scale_notes = self.get_scale_notes(root_midi, scale_name) - fill_start = max(0.0, section_length - 2.0) - degrees = [0, 2, 4, 6] - notes = [] - for index, degree in enumerate(degrees): - pitch = scale_notes[degree % len(scale_notes)] - notes.append(self._make_note(pitch, fill_start + (index * 0.5), 0.38, velocity + index * 4)) - return notes - - def _generate_fill_pattern(self, fill_name: str, start_offset: float) -> Tuple[List[Dict[str, Any]], List[str]]: - """ - Generate fill pattern at specified offset. - - Returns: - (notes, roles) - tuple of note list and list of roles used - """ - if fill_name not in FILL_PATTERNS: - return [], [] - - fill = FILL_PATTERNS[fill_name] - notes = [] - roles_used = [] - - pitch_map = { - 'kick': 36, 'snare': 38, 'hat': 42, 'hat_open': 46, - 'crash': 49, 'ride': 51, 'perc': 50 - } - - for role, positions in fill['pattern'].items(): - roles_used.append(role) - pitch = pitch_map.get(role, 50) - velocity = fill['velocities'].get(role, 90) - - for pos in positions: - start = start_offset + pos - duration = 0.1 if role in ['hat', 'hat_open', 'ride'] else 0.15 - notes.append(self._make_note(pitch, start, duration, velocity)) - - # Track materialization for debugging/logging - if not hasattr(self, '_transition_materialization_log'): - self._transition_materialization_log = [] - self._transition_materialization_log.append({ - 'fill': fill_name, - 'start': start_offset, - 'notes_count': len(notes), - 'roles': roles_used - }) - - return notes, roles_used - - def _generate_transition_events(self, sections: List[Dict[str, Any]]) -> List[Dict[str, Any]]: - """Generate fill and transition events between sections.""" - transition_events = [] - - # Calculate start positions for each section - arrangement_time = 0.0 - for section in sections: - section['start'] = arrangement_time - arrangement_time += float(section.get('beats', 0.0) or 0.0) - - for i, section in enumerate(sections): - kind = str(section.get('kind', '')).lower() - start = float(section.get('start', 0.0)) - length = float(section.get('beats', 8.0)) - end = start + length - - # Check for transition to next section - if i < len(sections) - 1: - next_kind = str(sections[i + 1].get('kind', '')).lower() - transition_key = (kind, next_kind) - - if transition_key in TRANSITION_EVENTS: - fills = TRANSITION_EVENTS[transition_key] - rng = self._section_rng(section, 'transition', salt=20) - fill_name = rng.choice(fills) - - # Get notes and roles from fill pattern - fill_notes, fill_roles = self._generate_fill_pattern(fill_name, end - 2.0) - - transition_events.append({ - 'fill': fill_name, - 'start': end - 2.0, - 'section_kind': kind, - 'next_section_kind': next_kind, - 'roles': fill_roles, - 'notes': fill_notes, # Include actual notes for materialization - 'notes_count': len(fill_notes) - }) - logger.debug("TRANSITION: Added '%s' at %.1f for %s->%s", - fill_name, end - 2.0, kind, next_kind) - - return transition_events - - def _apply_transition_density_rules(self, transition_events: List[Dict], - sections: List[Dict]) -> List[Dict]: - """ - Apply anti-overcrowding rules to transition events. - - Returns filtered list of events. - """ - if not transition_events: - return [] - - filtered = [] - last_event_time = {} # Track last time of each event type - section_fill_counts = defaultdict(int) # Track fills per section - - for event in transition_events: - fill_name = event.get('fill', '') - start = event.get('start', 0.0) - section_kind = event.get('section_kind', 'drop') - - # Rule 1: Max fills per section - max_fills = TRANSITION_DENSITY_RULES['max_fills_by_section'].get(section_kind, 2) - if section_fill_counts[section_kind] >= max_fills: - logger.debug("TRANSITION_DENSITY: Skipping '%s' - section '%s' at max (%d fills)", - fill_name, section_kind, max_fills) - continue - - # Rule 2: Minimum distance between same-type events - min_dist = TRANSITION_DENSITY_RULES['min_distance_same_type'].get(fill_name, 0) - if fill_name in last_event_time: - time_since_last = start - last_event_time[fill_name] - if time_since_last < min_dist: - logger.debug("TRANSITION_DENSITY: Skipping '%s' - too close to previous (%.1f < %.1f)", - fill_name, time_since_last, min_dist) - continue - - # Rule 3: Check for exclusive events at same position - skip = False - for existing in filtered: - if abs(existing.get('start', -999) - start) < 0.5: # Same position - for exclusive_set in TRANSITION_DENSITY_RULES['exclusive_events']: - if fill_name in exclusive_set and existing.get('fill') in exclusive_set: - logger.debug("TRANSITION_DENSITY: Skipping '%s' - exclusive with '%s' at %.1f", - fill_name, existing.get('fill'), start) - skip = True - break - if skip: - break - - if skip: - continue - - # Event passes all rules - filtered.append(event) - last_event_time[fill_name] = start - section_fill_counts[section_kind] += 1 - - logger.info("TRANSITION_DENSITY: %d events passed filtering (from %d original)", - len(filtered), len(transition_events)) - - return filtered - - def _transition_events_to_notes(self, transition_events: List[Dict]) -> List[Dict]: - """Convert filtered transition events to MIDI notes.""" - notes = [] - for event in transition_events: - fill_name = event.get('fill', '') - start = event.get('start', 0.0) - fill_notes, _ = self._generate_fill_pattern(fill_name, start) - notes.extend(fill_notes) - return notes - - def _materialize_transition_events(self, config: Dict[str, Any], - track_blueprints: List[Dict]) -> List[Dict]: - """ - Materialize transition events into track blueprints. - - Adds actual MIDI notes to transition-oriented tracks based on transition_events config. - """ - transition_events = config.get('transition_events', []) - if not transition_events: - config['transition_materialization'] = { - 'events_count': 0, - 'materialized': False, - 'note_count': 0, - 'track_roles': [], - } - return track_blueprints - - transition_track_targets = { - 'drum_fill_4bar': 'snare_fill', - 'drum_fill_2bar': 'snare_fill', - 'snare_roll': 'snare_fill', - 'hat_open_build': 'riser', - 'kick_drop': 'impact', - 'crash_impact': 'crash', - } - pitch_to_track_role = { - 36: 'kick', - 38: 'snare_fill', - 42: 'hat_closed', - 46: 'hat_open', - 49: 'crash', - 50: 'perc', - 51: 'ride', - } - - # Build a lookup dict of tracks by role - tracks_by_role = {} - for track in track_blueprints: - role = track.get('role', '') - if role: - tracks_by_role[role] = track - - # Track what was materialized - materialized_count = 0 - materialized_track_roles: set = set() - - # Materialize each transition event - for event in transition_events: - fill_name = event.get('fill', '') - fill_start = event.get('start', 0.0) - fill_notes = event.get('notes', []) - - if not fill_notes: - event['materialized'] = False - event['materialized_notes_count'] = 0 - event['materialized_track_roles'] = [] - continue - - preferred_track_role = transition_track_targets.get(fill_name) - preferred_note_map: Dict[str, List[Dict[str, Any]]] = {} - if preferred_track_role and preferred_track_role in tracks_by_role: - preferred_note_map[preferred_track_role] = list(fill_notes) - - fallback_note_map: Dict[str, List[Dict[str, Any]]] = {} - for note in fill_notes: - note_role = pitch_to_track_role.get(int(note.get('pitch', 0))) - if note_role: - fallback_note_map.setdefault(note_role, []).append(note) - - # Add notes to appropriate tracks - event_materialized_count = 0 - event_track_roles: set = set() - - for notes_by_track_role in [preferred_note_map, fallback_note_map]: - if not notes_by_track_role: - continue - - for track_role, notes_to_add in notes_by_track_role.items(): - if track_role not in tracks_by_role: - logger.debug("TRANSITION_MATERIALIZATION: No track for role '%s', skipping %d notes", - track_role, len(notes_to_add)) - continue - if track_role in event_track_roles: - continue - - track = tracks_by_role[track_role] - clips = track.get('clips', []) - - for clip in clips: - clip_scene_index = clip.get('scene_index', -1) - sections = config.get('sections', []) - if clip_scene_index < 0 or clip_scene_index >= len(sections): - continue - - section = sections[clip_scene_index] - section_start = float(section.get('start', 0.0)) - section_beats = float(section.get('beats', 0.0)) - - if section_start <= fill_start < section_start + section_beats: - existing_notes = clip.get('notes', []) - adjusted_notes = [] - for note in notes_to_add: - adjusted_note = dict(note) - adjusted_note['start'] = note['start'] - section_start - adjusted_notes.append(adjusted_note) - - existing_notes.extend(adjusted_notes) - existing_notes.sort(key=lambda item: (float(item.get('start', 0.0)), int(item.get('pitch', 0)))) - clip['notes'] = existing_notes - materialized_count += len(adjusted_notes) - event_materialized_count += len(adjusted_notes) - materialized_track_roles.add(track_role) - event_track_roles.add(track_role) - - logger.debug("TRANSITION_MATERIALIZATION: Added %d notes to track '%s' (role: %s) for fill '%s' at %.1f", - len(adjusted_notes), track.get('name', ''), track_role, fill_name, fill_start) - break - - if event_materialized_count > 0: - break - - event['materialized'] = event_materialized_count > 0 - event['materialized_notes_count'] = event_materialized_count - event['materialized_track_roles'] = sorted(event_track_roles) - - logger.info("TRANSITION_MATERIALIZATION: Total %d notes materialized across all tracks", materialized_count) - config['transition_materialization'] = { - 'events_count': len(transition_events), - 'materialized': materialized_count > 0, - 'note_count': materialized_count, - 'track_roles': sorted(materialized_track_roles), - } - return track_blueprints - - def _find_reference_track_profile(self) -> Optional[Dict[str, Any]]: - matches: List[Tuple[float, Dict[str, Any]]] = [] - audio_extensions = {'.wav', '.mp3', '.aiff', '.flac', '.aif', '.ogg'} - for directory in REFERENCE_SEARCH_DIRS: - if not directory.exists(): - continue - for candidate in sorted(directory.glob('*')): - if not candidate.is_file(): - continue - if candidate.suffix.lower() not in audio_extensions: - continue - normalized_name = candidate.name.lower() - for profile in REFERENCE_TRACK_PROFILES: - if all(term in normalized_name for term in profile.get('match_terms', [])): - resolved = dict(profile) - resolved['path'] = str(candidate) - resolved['file_name'] = candidate.name - try: - modified = float(candidate.stat().st_mtime) - except Exception: - modified = 0.0 - matches.append((modified, resolved)) - - if not matches: - return None - matches.sort(key=lambda item: item[0], reverse=True) - return matches[0][1] - - def _resolve_reference_track_profile(self, genre: str, style: str, bpm: float, - key: str, structure: str, - reference_energy_profile: Optional[List[Dict[str, Any]]] = None) -> Optional[Dict[str, Any]]: - profile = self._find_reference_track_profile() - if not profile: - return None - - target_genre = profile.get('genre', '') - compatible_genres = {target_genre, 'techno', 'tech-house', 'house'} - if genre and genre not in compatible_genres: - return None - - if bpm <= 0: - bpm = float(profile.get('bpm', bpm or 0)) - if not key: - key = profile.get('key', key) - if not style: - style = profile.get('style', style) - if not structure or structure == 'standard': - structure = profile.get('structure', structure or 'standard') - - result = { - 'genre': target_genre or genre, - 'style': style, - 'bpm': bpm, - 'key': key, - 'structure': structure, - 'reference': profile, - } - - # Forward energy profile if available - if reference_energy_profile: - result['reference_energy_profile'] = reference_energy_profile - - return result - - def _build_return_states(self, returns: List[Dict[str, Any]], section: Dict[str, Any]) -> List[Dict[str, Any]]: - if not returns: - return [] - - kind = str(section.get('kind', 'drop')).lower() - energy = max(1, int(section.get('energy', 1))) - profile_name = str(self._current_generation_profile.get('name', 'default')).lower() - style_text = str(self._current_generation_profile.get('style_text', '')).lower() - - volume_factors = { - 'space': {'intro': 0.94, 'build': 0.84, 'drop': 0.7, 'break': 1.02, 'outro': 0.86}, - 'echo': {'intro': 0.8, 'build': 1.04, 'drop': 0.72, 'break': 0.92, 'outro': 0.78}, - 'heat': {'intro': 0.56, 'build': 0.88, 'drop': 1.06, 'break': 0.42, 'outro': 0.66}, - 'glue': {'intro': 0.72, 'build': 0.86, 'drop': 1.02, 'break': 0.58, 'outro': 0.74}, - } - space_mix = {'intro': 0.94, 'build': 0.88, 'drop': 0.8, 'break': 1.0, 'outro': 0.9} - echo_mix = {'intro': 0.72, 'build': 0.92, 'drop': 0.62, 'break': 0.84, 'outro': 0.76} - width_targets = {'intro': 1.02, 'build': 1.08, 'drop': 1.12, 'break': 1.16, 'outro': 1.04} - filter_factors = {'intro': 0.86, 'build': 1.0, 'drop': 1.18, 'break': 0.78, 'outro': 0.9} - drive_offsets = {'intro': -1.2, 'build': 0.2, 'drop': 1.0, 'break': -1.6, 'outro': -0.5} - threshold_offsets = {'intro': 1.5, 'build': -0.5, 'drop': -2.0, 'break': 2.5, 'outro': 1.0} - - states = [] - for return_index, return_spec in enumerate(returns): - send_key = str(return_spec.get('send_key', return_spec.get('name', ''))).strip().lower() - if not send_key: - continue - - base_volume = float(return_spec.get('volume', 0.7)) - volume_factor = volume_factors.get(send_key, {}).get(kind, 1.0) - if send_key in ['heat', 'glue'] and energy >= 4: - volume_factor += 0.04 - elif send_key in ['space', 'echo'] and kind == 'break': - volume_factor += 0.04 - - if profile_name == 'warehouse' and send_key == 'heat': - volume_factor += 0.05 - elif profile_name == 'festival' and send_key == 'space': - volume_factor += 0.06 - elif profile_name == 'swing' and send_key == 'echo': - volume_factor += 0.05 - elif profile_name == 'jackin' and send_key == 'glue': - volume_factor += 0.05 - - if 'industrial' in style_text and send_key == 'heat': - volume_factor += 0.05 - if 'latin' in style_text and send_key == 'echo': - volume_factor += 0.06 - - state = { - 'return_index': return_index, - 'send_key': send_key, - 'volume': self._clamp_unit(base_volume * volume_factor), - 'device_parameters': [], - } - - for device_index, device_spec in enumerate(return_spec.get('device_chain', []) or []): - if not isinstance(device_spec, dict): - continue - device_name = str(device_spec.get('device', '') or '').strip() - if not device_name: - continue - device_name_lower = device_name.lower() - base_parameters = dict(device_spec.get('parameters', {})) - parameter_updates = {} - - if send_key == 'space': - if 'hybrid reverb' in device_name_lower: - parameter_updates['Dry/Wet'] = space_mix.get(kind, 0.9) - elif 'auto filter' in device_name_lower: - base_frequency = float(base_parameters.get('Frequency', 8200.0) or 8200.0) - parameter_updates['Frequency'] = round(base_frequency * filter_factors.get(kind, 1.0), 3) - parameter_updates['Dry/Wet'] = {'intro': 0.18, 'build': 0.22, 'drop': 0.08, 'break': 0.28, 'outro': 0.14}.get(kind, 0.16) - elif 'utility' in device_name_lower: - parameter_updates['Stereo Width'] = width_targets.get(kind, 1.08) - elif send_key == 'echo': - if 'echo' in device_name_lower: - parameter_updates['Dry/Wet'] = echo_mix.get(kind, 0.78) - elif 'auto filter' in device_name_lower: - base_frequency = float(base_parameters.get('Frequency', 9800.0) or 9800.0) - parameter_updates['Frequency'] = round(base_frequency * {'intro': 0.94, 'build': 1.08, 'drop': 0.88, 'break': 0.9, 'outro': 0.92}.get(kind, 1.0), 3) - parameter_updates['Dry/Wet'] = {'intro': 0.08, 'build': 0.14, 'drop': 0.06, 'break': 0.16, 'outro': 0.09}.get(kind, 0.1) - elif 'hybrid reverb' in device_name_lower: - parameter_updates['Dry/Wet'] = {'intro': 0.12, 'build': 0.18, 'drop': 0.08, 'break': 0.22, 'outro': 0.1}.get(kind, 0.12) - elif send_key == 'heat': - if 'saturator' in device_name_lower: - base_drive = float(base_parameters.get('Drive', 4.5) or 4.5) - parameter_updates['Drive'] = round(max(0.5, base_drive + drive_offsets.get(kind, 0.0)), 3) - elif 'compressor' in device_name_lower: - base_threshold = float(base_parameters.get('Threshold', -16.0) or -16.0) - parameter_updates['Threshold'] = round(base_threshold + threshold_offsets.get(kind, 0.0), 3) - elif send_key == 'glue': - if 'compressor' in device_name_lower: - base_threshold = float(base_parameters.get('Threshold', -18.0) or -18.0) - parameter_updates['Threshold'] = round(base_threshold + {'intro': 1.0, 'build': -0.6, 'drop': -1.4, 'break': 1.8, 'outro': 0.8}.get(kind, 0.0), 3) - elif 'limiter' in device_name_lower: - parameter_updates['Gain'] = {'intro': -0.4, 'build': 0.0, 'drop': 0.35, 'break': -0.6, 'outro': -0.3}.get(kind, 0.0) - - for parameter_name, value in parameter_updates.items(): - state['device_parameters'].append({ - 'device_index': int(device_index), - 'device_name': device_name, - 'parameter': parameter_name, - 'value': value, - }) - - states.append(state) - - return states - -# ========================================================================= - # SECTION AUTOMATION METHODS - # ========================================================================= - - def _generate_automation_envelope( - self, - parameter_start: float, - parameter_end: float, - section_length: float, - curve_name: str = 'linear', - num_points: int = 8 - ) -> List[Dict[str, Any]]: - """ - Generate automation envelope points for a parameter over a section. - - Args: - parameter_start: Starting value of the parameter - parameter_end: Ending value of the parameter - section_length: Length of the section in beats - curve_name: Name of the envelope curve to use - num_points: Number of envelope points to generate - - Returns: - List of automation points with time and value - """ - curve_func = ENVELOPE_CURVES.get(curve_name, ENVELOPE_CURVES['linear']) - envelope_points = [] - - for i in range(num_points): - position = i / (num_points - 1) if num_points > 1 else 0.0 - curved_position = curve_func(position) - value = parameter_start + (parameter_end - parameter_start) * curved_position - time = section_length * position - - envelope_points.append({ - 'time': round(time, 3), - 'value': round(value, 4), - 'curve_position': round(position, 3), - }) - - return envelope_points - - def _build_section_automation( - self, - section: Dict[str, Any], - buses: List[Dict[str, Any]], - returns: List[Dict[str, Any]] - ) -> Dict[str, Any]: - """ - Build automation data for a single section. - - Args: - section: Section configuration dictionary - buses: List of bus track configurations - returns: List of return track configurations - - Returns: - Dictionary containing automation data for the section - """ - kind = str(section.get('kind', 'drop')).lower() - section_length = float(section.get('beats', 32.0)) - energy = float(section.get('energy', 1)) - - # Get base automation template for this section kind - base_automation = SECTION_AUTOMATION.get(kind, SECTION_AUTOMATION.get('drop', {})) - - # Determine envelope curve - curve_name = base_automation.get('envelope_curve', 'linear') - - # Apply energy scaling - energy_factor = max(0.5, min(1.5, energy / 3.0)) - - automation_data = { - 'section_index': int(section.get('index', 0)), - 'section_name': section.get('name', 'SECTION'), - 'section_kind': kind, - 'section_length': section_length, - 'energy': round(base_automation.get('energy', 0.5) * energy_factor, 3), - 'bus_automation': [], - 'return_automation': [], - 'master_automation': {}, - } - - # Build bus automation - for bus in buses: - bus_key = str(bus.get('key', '')).lower() - if not bus_key: - continue - - bus_filter_settings = base_automation.get('filters', {}).get(bus_key, {}) - if not bus_filter_settings: - continue - - bus_auto = { - 'bus_key': bus_key, - 'bus_name': bus.get('name', bus_key.upper()), - 'parameters': [] - } - - # Filter frequency automation - if 'frequency' in bus_filter_settings: - freq_start = bus_filter_settings['frequency'] * (1.1 - energy_factor * 0.2) - freq_end = bus_filter_settings['frequency'] * energy_factor - bus_auto['parameters'].append({ - 'device': 'Auto Filter', - 'parameter': 'Frequency', - 'envelope': self._generate_automation_envelope( - freq_start, freq_end, section_length, curve_name - ), - 'start_value': round(freq_start, 1), - 'end_value': round(freq_end, 1), - }) - - # Filter resonance automation - if 'resonance' in bus_filter_settings: - res_start = bus_filter_settings['resonance'] * 0.8 - res_end = bus_filter_settings['resonance'] * energy_factor - bus_auto['parameters'].append({ - 'device': 'Auto Filter', - 'parameter': 'Resonance', - 'envelope': self._generate_automation_envelope( - res_start, res_end, section_length, 'ease_in_out' - ), - 'start_value': round(res_start, 3), - 'end_value': round(res_end, 3), - }) - - if bus_auto['parameters']: - automation_data['bus_automation'].append(bus_auto) - - # Build return automation - reverb_settings = base_automation.get('reverb', {}) - delay_settings = base_automation.get('delay', {}) - compression_settings = base_automation.get('compression', {}) - saturation_settings = base_automation.get('saturation', {}) - stereo_width_settings = base_automation.get('stereo_width', {}) - - for return_track in returns: - send_key = str(return_track.get('send_key', '')).lower() - if not send_key: - continue - - return_auto = { - 'send_key': send_key, - 'return_name': return_track.get('name', send_key.upper()), - 'parameters': [] - } - - if send_key == 'space' and reverb_settings: - # Reverb send level - return_auto['parameters'].append({ - 'device': 'Hybrid Reverb', - 'parameter': 'Dry/Wet', - 'envelope': self._generate_automation_envelope( - reverb_settings.get('send_level', 0.2) * 0.9, - reverb_settings.get('send_level', 0.2) * energy_factor, - section_length, curve_name - ), - 'start_value': round(reverb_settings.get('send_level', 0.2) * 0.9, 3), - 'end_value': round(reverb_settings.get('send_level', 0.2) * energy_factor, 3), - }) - # Decay time - return_auto['parameters'].append({ - 'device': 'Hybrid Reverb', - 'parameter': 'Decay Time', - 'envelope': self._generate_automation_envelope( - reverb_settings.get('decay_time', 2.0) * 0.85, - reverb_settings.get('decay_time', 2.0), - section_length, 'ease_out' - ), - 'start_value': round(reverb_settings.get('decay_time', 2.0) * 0.85, 2), - 'end_value': round(reverb_settings.get('decay_time', 2.0), 2), - }) - - elif send_key == 'echo' and delay_settings: - # Delay send level - return_auto['parameters'].append({ - 'device': 'Echo', - 'parameter': 'Dry/Wet', - 'envelope': self._generate_automation_envelope( - delay_settings.get('send_level', 0.15) * 0.85, - delay_settings.get('send_level', 0.15) * energy_factor, - section_length, curve_name - ), - 'start_value': round(delay_settings.get('send_level', 0.15) * 0.85, 3), - 'end_value': round(delay_settings.get('send_level', 0.15) * energy_factor, 3), - }) - # Feedback - return_auto['parameters'].append({ - 'device': 'Echo', - 'parameter': 'Feedback', - 'envelope': self._generate_automation_envelope( - delay_settings.get('feedback', 0.3) * 0.8, - delay_settings.get('feedback', 0.3), - section_length, 'ramp_up' - ), - 'start_value': round(delay_settings.get('feedback', 0.3) * 0.8, 3), - 'end_value': round(delay_settings.get('feedback', 0.3), 3), - }) - - elif send_key == 'heat' and saturation_settings: - # Saturation drive - return_auto['parameters'].append({ - 'device': 'Saturator', - 'parameter': 'Drive', - 'envelope': self._generate_automation_envelope( - saturation_settings.get('drive', 2.0) * 0.6, - saturation_settings.get('drive', 2.0) * energy_factor, - section_length, 'ramp_up' - ), - 'start_value': round(saturation_settings.get('drive', 2.0) * 0.6, 2), - 'end_value': round(saturation_settings.get('drive', 2.0) * energy_factor, 2), - }) - - elif send_key == 'glue' and compression_settings: - # Compressor threshold - return_auto['parameters'].append({ - 'device': 'Compressor', - 'parameter': 'Threshold', - 'envelope': self._generate_automation_envelope( - compression_settings.get('threshold', -12.0) + 3, - compression_settings.get('threshold', -12.0) - (energy_factor - 1) * 2, - section_length, 'ease_in' - ), - 'start_value': round(compression_settings.get('threshold', -12.0) + 3, 1), - 'end_value': round(compression_settings.get('threshold', -12.0) - (energy_factor - 1) * 2, 1), - }) - - if return_auto['parameters']: - automation_data['return_automation'].append(return_auto) - - # Build master automation - automation_data['master_automation'] = { - 'stereo_width': { - 'parameter': 'Stereo Width', - 'envelope': self._generate_automation_envelope( - stereo_width_settings.get('value', 1.0) * 0.9, - stereo_width_settings.get('value', 1.0), - section_length, 'ease_in_out' - ), - 'start_value': round(stereo_width_settings.get('value', 1.0) * 0.9, 3), - 'end_value': round(stereo_width_settings.get('value', 1.0), 3), - }, - 'compression': { - 'parameter': 'Ratio', - 'envelope': self._generate_automation_envelope( - compression_settings.get('ratio', 2.0) * 0.8, - compression_settings.get('ratio', 2.0) * energy_factor, - section_length, 'ease_in' - ), - 'start_value': round(compression_settings.get('ratio', 2.0) * 0.8, 2), - 'end_value': round(compression_settings.get('ratio', 2.0) * energy_factor, 2), - }, - } - - return automation_data - - def _build_full_automation_blueprint( - self, - sections: List[Dict[str, Any]], - buses: List[Dict[str, Any]], - returns: List[Dict[str, Any]] - ) -> List[Dict[str, Any]]: - """ - Build complete automation blueprint for all sections. - - Args: - sections: List of section configurations - buses: List of bus track configurations - returns: List of return track configurations - - Returns: - List of automation data dictionaries, one per section - """ - automation_blueprint = [] - - for section in sections: - section_automation = self._build_section_automation(section, buses, returns) - automation_blueprint.append(section_automation) - - return automation_blueprint - - def _build_master_state(self, section_kind: str) -> Dict[str, Any]: - """ - Build master chain state for a section. - - Returns a snapshot payload with flat device parameters for master chain. - """ - section = section_kind.lower() - device_parameters = [] - for device_name, parameter_map in MASTER_DEVICE_AUTOMATION.items(): - for parameter_name, section_values in parameter_map.items(): - value = section_values.get(section, section_values.get('drop', 0.0)) - clamp = MASTER_SAFETY_CLAMPS.get(parameter_name) - if clamp: - value = max(clamp['min'], min(clamp['max'], float(value))) - device_parameters.append({ - 'device_name': device_name, - 'parameter': parameter_name, - 'value': round(float(value), 3), - }) - - return { - 'section': section, - 'device_parameters': device_parameters, - } - - def _build_device_parameters_for_role(self, role: str, section_kind: str) -> List[Dict[str, Any]]: - """ - Build flat device parameter automation entries for a track role in a section. - """ - role_lower = role.lower().replace(' ', '_').replace('-', '_') - if role_lower not in SECTION_DEVICE_AUTOMATION: - return [] - section = section_kind.lower() - device_params = [] - for device_name, parameter_map in SECTION_DEVICE_AUTOMATION.get(role_lower, {}).items(): - for parameter_name, section_values in parameter_map.items(): - value = section_values.get(section, section_values.get('drop', 0.0)) - clamp = DEVICE_PARAMETER_SAFETY_CLAMPS.get(parameter_name) - if clamp: - value = max(clamp['min'], min(clamp['max'], float(value))) - device_params.append({ - 'device_name': device_name, - 'parameter': parameter_name, - 'value': round(float(value), 3), - }) - return device_params - - def _build_bus_device_parameters(self, bus_key: str, section_kind: str) -> List[Dict[str, Any]]: - """ - Build flat device parameter automation entries for a bus track in a section. - Uses BUS_DEVICE_AUTOMATION constant for per-section values. - """ - bus_key_lower = bus_key.lower() - if bus_key_lower not in BUS_DEVICE_AUTOMATION: - return [] - section = section_kind.lower() - device_params = [] - for device_name, parameter_map in BUS_DEVICE_AUTOMATION.get(bus_key_lower, {}).items(): - for parameter_name, section_values in parameter_map.items(): - value = section_values.get(section, section_values.get('drop',0.0)) - clamp = DEVICE_PARAMETER_SAFETY_CLAMPS.get(parameter_name) - if clamp: - value = max(clamp['min'], min(clamp['max'], float(value))) - device_params.append({ - 'device_name': device_name, - 'parameter': parameter_name, - 'value': round(float(value), 3), - }) - return device_params - - def _build_performance_snapshots(self, blueprint_tracks: List[Dict[str, Any]], - sections: List[Dict[str, Any]], - returns: Optional[List[Dict[str, Any]]] = None, - buses: Optional[List[Dict[str, Any]]] = None, - reference_energy_profile: Optional[List[Dict[str, Any]]] = None) -> List[Dict[str, Any]]: - performance = [] - stereo_roles = {'hat_closed', 'hat_open', 'top_loop', 'perc', 'ride', 'pad', 'pluck', 'arp', 'counter', 'reverse_fx', 'riser', 'impact', 'atmos', 'vocal'} - profile_pan_width = float(self._current_generation_profile.get('pan_width', 0.12)) - volume_factors = { - 'intro': 0.86, - 'build': 0.94, - 'drop': 1.02, - 'break': 0.78, - 'outro': 0.8, - } - - # Build energy profile lookup by section index for adaptive mixing - energy_by_index = {} - if reference_energy_profile: - for i, ep in enumerate(reference_energy_profile): - energy_by_index[i] = ep.get('energy_mean', 0.5) - else: - # Fallback: use section features if available - for i, section in enumerate(sections): - features = section.get('features', {}) - energy_by_index[i] = features.get('energy_mean', features.get('energy', 0.5)) - - space_send_factors = { - 'intro': 1.15, - 'build': 1.0, - 'drop': 0.82, - 'break': 1.35, - 'outro': 1.05, - } - echo_send_factors = { - 'intro': 1.08, - 'build': 1.18, - 'drop': 0.78, - 'break': 1.45, - 'outro': 0.95, - } - heat_send_factors = { - 'intro': 0.55, - 'build': 0.92, - 'drop': 1.18, - 'break': 0.42, - 'outro': 0.72, - } - glue_send_factors = { - 'intro': 0.72, - 'build': 0.96, - 'drop': 1.08, - 'break': 0.58, - 'outro': 0.78, - } - - for section_idx, section in enumerate(sections): - kind = str(section.get('kind', 'drop')).lower() - energy = max(1, int(section.get('energy', 1))) - - # Get energy_mean from reference profile for adaptive volume scaling - ref_energy_mean = energy_by_index.get(section_idx, 0.5) - - snapshot = { - 'scene_index': int(section.get('index', len(performance))), - 'name': section.get('name', "SECTION"), - 'track_states': [], - 'return_states': self._build_return_states(list(returns or []), section), - 'bus_states': [], - } - - for track_index, track_data in enumerate(blueprint_tracks): - role = track_data.get('role', '') - base_volume = float(track_data.get('volume', 0.72)) - base_pan = float(track_data.get('pan', 0.0)) - base_sends = dict(track_data.get('sends', {})) - intensity = self._role_intensity(role, section) - is_muted = role != 'sc_trigger' and intensity <= 0 - - if is_muted: - target_volume = round(base_volume * 0.08, 3) - else: - factor = volume_factors.get(kind, 1.0) + max(0.0, (energy - 3) * 0.03) - if role in ['kick', 'sub_bass', 'bass'] and kind == 'drop': - factor += 0.04 - if role in ['pad', 'atmos', 'drone'] and kind == 'break': - factor += 0.08 - if role in ['reverse_fx', 'riser', 'impact'] and kind in ['build', 'break']: - factor += 0.06 * float(self._current_generation_profile.get('fx_bias', 1.0)) - - # Apply energy-based volume scaling from reference profile - if ref_energy_mean < 0.3: - # Quiet sections (intro, quiet breaks) - reduce volume - energy_volume_factor = 0.85 - elif ref_energy_mean > 0.7: - # High energy sections (drops, peaks) - boost volume - energy_volume_factor = 1.08 - else: - energy_volume_factor = 1.0 - - target_volume = round(min(1.0, max(0.0, base_volume * factor * energy_volume_factor)), 3) - - target_pan = base_pan - pan_variant = str(section.get('pan_variant', 'narrow')).lower() - if role in stereo_roles: - if pan_variant == 'tilt_left': - direction = -1.0 - width = profile_pan_width - elif pan_variant == 'tilt_right': - direction = 1.0 - width = profile_pan_width - elif pan_variant == 'wide': - direction = -1.0 if track_index % 2 == 0 else 1.0 - width = profile_pan_width * 1.1 - else: - direction = -1.0 if track_index % 2 == 0 else 1.0 - width = profile_pan_width * 0.55 - - if kind == 'break': - width *= 1.18 - elif kind == 'drop': - width *= 0.92 - target_pan = self._clamp_pan(base_pan + (direction * width)) - - target_sends = {} - for send_name, send_value in base_sends.items(): - send_factor = 1.0 - if send_name == 'space': - send_factor = space_send_factors.get(kind, 1.0) - elif send_name == 'echo': - send_factor = echo_send_factors.get(kind, 1.0) - elif send_name == 'heat': - send_factor = heat_send_factors.get(kind, 1.0) - elif send_name == 'glue': - send_factor = glue_send_factors.get(kind, 1.0) - - if role in ['riser', 'impact'] and kind in ['build', 'break']: - send_factor += 0.18 - if role == 'vocal' and kind in ['build', 'drop']: - send_factor += 0.12 - if role in ['kick', 'sub_bass', 'bass'] and send_name in ['heat', 'glue'] and kind == 'drop': - send_factor += 0.1 - if is_muted: - send_factor *= 0.25 - - target_sends[send_name] = round(min(1.0, max(0.0, float(send_value) * send_factor)), 3) - - track_state = { - 'track_index': track_index, - 'role': role, - 'mute': is_muted, - 'volume': target_volume, - 'pan': target_pan, - 'sends': target_sends, - } - - # Add device_parameters to track state - device_params = self._build_device_parameters_for_role(role, kind) - if device_params: - track_state['device_parameters'] = device_params - - snapshot['track_states'].append(track_state) - - # Add bus states to snapshot - for bus_data in list(buses or []): - bus_key = str(bus_data.get('key', '')).lower() - if not bus_key: - continue - bus_device_params = self._build_bus_device_parameters(bus_key, kind) - if bus_device_params: - bus_state = { - 'bus_key': bus_key, - 'bus_name': bus_data.get('name', bus_key.upper()), - 'device_parameters': bus_device_params, - } - snapshot['bus_states'].append(bus_state) - - # Add master state to snapshot - master_state = self._build_master_state(kind) - if master_state.get('device_parameters'): - snapshot['master_state'] = master_state - - performance.append(snapshot) - - return performance - - def _build_mix_automation_summary(self, performance: List[Dict]) -> Dict[str, Any]: - """ - Build summary of automation in performance snapshots. - - Returns: - - track_snapshots_with_device_automation: count - - return_snapshots_with_device_automation: count - - bus_snapshots_with_device_automation: count - - master_snapshots_count: count - - track_roles_touched: list of roles with device automation - - bus_keys_touched: list of bus keys with device automation - - master_parameters_touched: list of master params automated - """ - track_count = 0 - return_count = 0 - bus_count = 0 - master_count = 0 - track_roles = set() - bus_keys = set() - master_params = set() - - for snapshot in performance: - # Check track states - for track_state in snapshot.get('track_states', []): - if 'device_parameters' in track_state and track_state['device_parameters']: - track_count += 1 - role = track_state.get('role', 'unknown') - track_roles.add(role) - - # Check return states - for return_state in snapshot.get('return_states', []): - if 'device_parameters' in return_state and return_state['device_parameters']: - return_count += 1 - - # Check bus states - for bus_state in snapshot.get('bus_states', []): - if 'device_parameters' in bus_state and bus_state['device_parameters']: - bus_count += 1 - bus_key = bus_state.get('bus_key', 'unknown') - bus_keys.add(bus_key) - - # Check master state - master_state = snapshot.get('master_state', {}) - if master_state.get('device_parameters'): - master_count += 1 - for item in master_state.get('device_parameters', []): - param_name = str(item.get('parameter', '') or '').strip() - if param_name: - master_params.add(param_name) - - return { - 'track_snapshots_with_device_automation': track_count, - 'return_snapshots_with_device_automation': return_count, - 'bus_snapshots_with_device_automation': bus_count, - 'master_snapshots_count': master_count, - 'track_roles_touched': sorted(list(track_roles)), - 'bus_keys_touched': sorted(list(bus_keys)), - 'master_parameters_touched': sorted(list(master_params)) - } - - def _verify_automation_safety(self, performance: List[Dict]) -> List[str]: - """ - Verify automation values are within safe ranges. - - Returns list of warnings if any values are outside safe ranges. - """ - warnings = [] - - for i, snapshot in enumerate(performance): - # Check master state - master_state = snapshot.get('master_state', {}) - for item in master_state.get('device_parameters', []): - device_name = str(item.get('device_name', 'unknown')) - param_name = str(item.get('parameter', '') or '').strip() - value = float(item.get('value', 0.0)) - clamp = MASTER_SAFETY_CLAMPS.get(param_name) - if clamp and (value < clamp['min'] or value > clamp['max']): - warnings.append(f"Snapshot {i}: {device_name}.{param_name}={value} outside safe range [{clamp['min']}, {clamp['max']}]") - - return warnings - - def _build_gain_staging_summary(self, config: Dict[str, Any]) -> Dict[str, Any]: - """ - Build gain staging summary for the generated config. - """ - warnings = [] - - # Check bus volumes for extreme values - bus_volumes = self._calibrated_bus_volumes or {} - for bus_name, vol in bus_volumes.items(): - if vol > 0.9: - warnings.append(f"Bus {bus_name} volume > 0.9: {vol:.3f}") - - # Check master limiter gain - master = config.get('master', {}) - master_limiter_gain = 0.0 - for device in master.get('device_chain', []): - if device.get('device') == 'Limiter': - master_limiter_gain = device.get('parameters', {}).get('Gain', 0.0) - if master_limiter_gain > 1.0: - warnings.append(f"Master limiter gain > 1.0: {master_limiter_gain:.3f}") - - # Check track volumes - for track in config.get('tracks', []): - vol = track.get('volume', 0.0) - role = track.get('role', 'unknown') - if vol > 0.9: - warnings.append(f"Track {role} volume > 0.9: {vol:.3f}") - - return { - 'master_profile_used': getattr(self, '_master_profile_used', 'default'), - 'style_adjustments_applied': getattr(self, '_style_adjustments_applied', []), - 'bus_volumes': bus_volumes, - 'track_volume_overrides_count': getattr(self, '_gain_calibration_overrides_count', 0), - 'peak_reductions_applied_count': getattr(self, '_peak_reductions_count', 0), - 'headroom_target_db': TARGET_HEADROOM_DB, - 'warnings': warnings, - } - - def generate_config(self, genre: str, style: str = "", bpm: float = 0, - key: str = "", structure: str = "standard") -> Dict[str, Any]: - """ - Genera una configuración completa de track - - Args: - genre: Género musical - style: Sub-estilo - bpm: BPM (0 = auto) - key: Tonalidad ("" = auto) - structure: Tipo de estructura - """ - genre = genre.lower().replace(' ', '-') - style = style.lower() if style else "" - variant_seed = random.SystemRandom().randint(1000, 999999) - random.seed(variant_seed) - - # Decay pattern variant memory to allow reuse - _decay_pattern_variant_memory() - - # Reset gain staging counters - self._gain_calibration_overrides_count = 0 - self._peak_reductions_count = 0 - self._style_adjustments_applied = [] - self._calibrated_bus_volumes = {} - self._master_profile_used = 'default' - - reference_resolution = self._resolve_reference_track_profile(genre, style, bpm, key, structure) - if reference_resolution: - genre = reference_resolution.get('genre', genre) or genre - style = reference_resolution.get('style', style) - bpm = float(reference_resolution.get('bpm', bpm or 0)) - key = reference_resolution.get('key', key) - structure = reference_resolution.get('structure', structure) - - # Obtener configuración del género - genre_config = GENRE_CONFIGS.get(genre, GENRE_CONFIGS['techno']) - - # Determinar BPM - if bpm <= 0: - bpm = genre_config['default_bpm'] - - # Determinar key - if not key: - key = random.choice(genre_config['keys']) - - # Determinar estilo si no se especificó - if not style: - style = random.choice(genre_config['styles']) - - # Parsear key - _root_note = key[:-1] if len(key) > 1 else key # noqa: F841 - parsed when needed per section - is_minor = 'm' in key.lower() - scale = 'minor' if is_minor else 'major' - profile = self._build_arrangement_profile(genre, style, variant_seed) - profile['style_text'] = f"{genre} {style}".strip().lower() - profile['reference_name'] = str(((reference_resolution or {}).get('reference') or {}).get('name', '')).lower() - self._current_generation_profile = profile - sections = self._build_sections(structure, style, variant_seed, profile) - - # Crear configuración base - config = { - 'name': f"{genre.title()} {style.title()}", - 'bpm': bpm, - 'key': key, - 'scale': scale, - 'genre': genre, - 'style': style, - 'structure': structure, - 'variant_seed': variant_seed, - 'arrangement_profile': profile['name'], - 'reference_track': reference_resolution.get('reference') if reference_resolution else None, - 'reference_energy_profile': reference_resolution.get('reference_energy_profile') if reference_resolution else None, - 'auto_generate': True, - 'sections': sections, - 'buses': self._build_mix_bus_blueprint(profile, genre, style, reference_resolution), - 'returns': self._build_return_blueprint(profile, genre, style, reference_resolution), - 'master': self._build_master_blueprint(profile, genre, style, reference_resolution), - 'tracks': [], - } - - # Generar tracks según género - config['tracks'] = self._generate_tracks_for_genre(genre, style, key, scale, structure, sections, profile) - config['performance'] = self._build_performance_snapshots(config['tracks'], sections, config.get('returns', []), config.get('buses', [])) - config['mix_automation_summary'] = self._build_mix_automation_summary(config['performance']) - config['mix_automation_warnings'] = self._verify_automation_safety(config['performance']) - config['gain_staging_summary'] = self._build_gain_staging_summary(config) - config['automation'] = self._build_full_automation_blueprint(sections, config.get('buses', []), config.get('returns', [])) - config['transition_events'] = self._generate_transition_events(sections) - - # Apply density rules to prevent overcrowding - config['transition_events'] = self._apply_transition_density_rules(config['transition_events'], sections) - - # Materialize transition events into track blueprints - config['tracks'] = self._materialize_transition_events(config, config['tracks']) - - config['locators'] = self._build_locators(sections) - config['total_bars'] = sum(section['bars'] for section in sections) - config['total_beats'] = float(config['total_bars'] * 4) - - # Add section variants summary - config['section_variants'] = { - section.get('name', f'section_{i}'): { - 'kind': section.get('kind', 'unknown'), - 'drum_variant': section.get('drum_variant', 'straight'), - 'kick_variant': section.get('kick_variant', (section.get('drum_role_variants') or {}).get('kick', 'straight')), - 'clap_variant': section.get('clap_variant', (section.get('drum_role_variants') or {}).get('clap', 'straight')), - 'hat_closed_variant': section.get('hat_closed_variant', (section.get('drum_role_variants') or {}).get('hat_closed', 'straight')), - 'bass_variant': section.get('bass_variant', 'anchor'), - 'bass_bank_variant': section.get('bass_bank_variant', section.get('bass_variant', 'anchor')), - 'melodic_variant': section.get('melodic_variant', 'motif'), - 'melodic_bank_variant': section.get('melodic_bank_variant', section.get('melodic_variant', 'motif')), - 'transition_fill': section.get('transition_fill', 'none'), - } - for i, section in enumerate(sections) - } - - # Crear summary - config['summary'] = f""" -🎵 Track Generado: {config['name']} -♩ BPM: {bpm} -🎹 Key: {key} -🎨 Style: {style} -📊 Tracks: {len(config['tracks'])} -""" - if config.get('reference_track'): - config['summary'] += f"🔊 Reference: {config['reference_track'].get('name')}\n" - - return config - - def _build_locators(self, sections: List[Dict[str, Any]]) -> List[Dict[str, Any]]: - locators = [] - arrangement_time = 0.0 - for section in sections: - locators.append({ - 'scene_index': int(section.get('index', len(locators))), - 'name': section.get('name', 'SECTION'), - 'bars': int(section.get('bars', 8)), - 'color': int(section.get('color', 10)), - 'time_beats': round(arrangement_time, 3), - }) - arrangement_time += float(section.get('beats', 0.0) or 0.0) - return locators - - def _generate_tracks_for_genre(self, genre: str, style: str, key: str, - scale: str, structure: str, sections: List[Dict[str, Any]], - profile: Optional[Dict[str, Any]] = None) -> List[Dict]: - """Genera la configuración de tracks según el género""" - track_specs = [] - style_text = f"{genre} {style}".lower() - - track_specs.extend([ - ('SC TRIGGER', 'sc_trigger', TRACK_COLORS['technical'], 'operator'), - ('KICK', 'kick', TRACK_COLORS['kick'], 'operator'), - ('CLAP', 'clap', TRACK_COLORS['clap'], 'operator'), - ('SNARE FILL', 'snare_fill', TRACK_COLORS['snare'], 'operator'), - ('HAT CLOSED', 'hat_closed', TRACK_COLORS['hat'], 'operator'), - ('HAT OPEN', 'hat_open', TRACK_COLORS['hat'], 'operator'), - ('TOP LOOP', 'top_loop', TRACK_COLORS['hat'], 'operator'), - ('PERCUSSION', 'perc', TRACK_COLORS['perc'], 'operator'), - ('TOM FILL', 'tom_fill', TRACK_COLORS['perc'], 'operator'), - ('SUB BASS', 'sub_bass', TRACK_COLORS['bass'], 'operator'), - ('BASS', 'bass', TRACK_COLORS['bass'], 'operator'), - ('DRONE', 'drone', TRACK_COLORS['pad'], 'analog'), - ('CHORDS', 'chords', TRACK_COLORS['chords'], 'wavetable'), - ('STAB', 'stab', TRACK_COLORS['synth'], 'operator'), - ('PAD', 'pad', TRACK_COLORS['pad'], 'wavetable'), - ('ARP', 'arp', TRACK_COLORS['synth'], 'operator'), - ('LEAD', 'lead', TRACK_COLORS['synth'], 'wavetable'), - ('COUNTER', 'counter', TRACK_COLORS['synth'], 'operator'), - ('CRASH', 'crash', TRACK_COLORS['fx'], 'operator'), - ('REVERSE FX', 'reverse_fx', TRACK_COLORS['fx'], 'analog'), - ('RISER FX', 'riser', TRACK_COLORS['fx'], 'operator'), - ('IMPACT FX', 'impact', TRACK_COLORS['fx'], 'operator'), - ('ATMOS', 'atmos', TRACK_COLORS['fx'], 'analog'), - ]) - tracks = [] - - # Synths/Chords según género - if genre in ['house', 'trance', 'progressive']: - tracks.append(self._generate_chord_track(key, scale, genre)) - tracks.append(self._generate_lead_track(key, scale, genre)) - elif genre in ['techno', 'tech-house']: - if random.random() > 0.3: # 70% de probabilidad - tracks.append(self._generate_chord_track(key, scale, genre)) - if random.random() > 0.5: - tracks.append(self._generate_lead_track(key, scale, genre)) - - # FX/Atmósfera para estructuras extended - if structure in ['extended', 'club'] or random.random() > 0.6: - tracks.append(self._generate_fx_track()) - - if genre in ['techno', 'tech-house', 'trance']: - track_specs.insert(8, ('RIDE', 'ride', TRACK_COLORS['ride'], 'operator')) - if genre in ['house', 'tech-house', 'trance'] or 'latin' in style_text: - track_specs.insert(14, ('PLUCK', 'pluck', TRACK_COLORS['synth'], 'wavetable')) - track_specs.insert(15, ('VOCAL CHOP', 'vocal', TRACK_COLORS['vocal'], 'wavetable')) - elif genre == 'drum-and-bass': - track_specs = [ - ('BREAK', 'kick', TRACK_COLORS['kick'], 'operator'), - ('SNARE', 'clap', TRACK_COLORS['snare'], 'operator'), - ('HATS', 'hat_closed', TRACK_COLORS['hat'], 'operator'), - ('PERCUSSION', 'perc', TRACK_COLORS['perc'], 'operator'), - ('SUB BASS', 'sub_bass', TRACK_COLORS['bass'], 'operator'), - ('REESE', 'bass', TRACK_COLORS['bass'], 'operator'), - ('PAD', 'pad', TRACK_COLORS['pad'], 'wavetable'), - ('ARP', 'arp', TRACK_COLORS['synth'], 'operator'), - ('LEAD', 'lead', TRACK_COLORS['synth'], 'wavetable'), - ('VOCAL', 'vocal', TRACK_COLORS['vocal'], 'wavetable'), - ('RISER FX', 'riser', TRACK_COLORS['fx'], 'operator'), - ('ATMOS', 'atmos', TRACK_COLORS['fx'], 'analog'), - ] - - blueprint_tracks = [] - active_profile = dict(profile or self._current_generation_profile or {'name': 'default'}) - # NTH-04: Resolve genre-specific colors when available - genre_palette = GENRE_COLOR_PALETTES.get(genre, {}) - for name, role, default_color, device in track_specs: - clips = self._build_scene_clips(role, genre, style, key, scale, sections) - if not clips: - continue - - mix_profile = dict(ROLE_MIX.get(role, {})) - mix_profile['sends'] = self._extend_parallel_sends(role, mix_profile.get('sends', {})) - mix_profile = self._shape_mix_profile(role, mix_profile, active_profile, style) - # NTH-04: Use genre-specific color if available, otherwise default - resolved_color = genre_palette.get(role, default_color) - track = { - 'name': name, - 'type': 'midi', - 'role': role, - 'bus': self._resolve_bus_for_role(role), - 'device': device, - 'color': resolved_color, - 'volume': mix_profile.get('volume', 0.72), - 'pan': mix_profile.get('pan', 0.0), - 'sends': dict(mix_profile.get('sends', {})), - 'fx_chain': self._shape_role_fx_chain(role, active_profile, style), - 'clips': clips, - } - track['clip'] = dict(clips[0]) - - # Agregar metadata de variación al blueprint - if role in SECTION_VARIATION_CONFIG: - track['section_variation'] = SECTION_VARIATION_CONFIG[role] - track['can_vary_by_section'] = True - - blueprint_tracks.append(track) - - return blueprint_tracks - - def _build_sections(self, structure: str, style: str = "", variant_seed: Optional[int] = None, - profile: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]: - structure_key = structure.lower() - rng = random.Random(variant_seed) if variant_seed is not None else random - blueprint_options = SECTION_BLUEPRINT_VARIANTS.get(structure_key) - if blueprint_options: - if 'latin' in style and structure_key == 'club' and len(blueprint_options) > 1: - blueprint = rng.choice(blueprint_options[1:]) - else: - blueprint = rng.choice(blueprint_options) - else: - blueprint = SECTION_BLUEPRINTS.get(structure_key, SECTION_BLUEPRINTS['standard']) - sections = [] - style_text = style.lower() if style else "" - profile_name = str((profile or {}).get('name', 'default')).lower() - for index, (name, bars, color, kind, energy) in enumerate(blueprint): - if kind == 'intro': - drum_variants = ['straight', 'skip'] - bass_variants = ['anchor', 'pedal'] - melodic_variants = ['motif', 'response'] - elif kind == 'build': - drum_variants = ['shuffle', 'pressure', 'straight'] - bass_variants = ['bounce', 'syncopated'] - melodic_variants = ['lift', 'response'] - elif kind == 'break': - drum_variants = ['skip', 'shuffle'] - bass_variants = ['pedal', 'anchor'] - melodic_variants = ['drone', 'response'] - elif kind == 'outro': - drum_variants = ['straight', 'skip'] - bass_variants = ['anchor', 'pedal'] - melodic_variants = ['motif', 'descend'] - else: - drum_variants = ['straight', 'pressure', 'shuffle'] - bass_variants = ['syncopated', 'bounce', 'anchor'] - melodic_variants = ['lift', 'motif', 'descend'] - - swing_pool = [0.0, 0.015, 0.025] - if 'latin' in style_text or profile_name in ['jackin', 'swing']: - swing_pool.extend([0.035, 0.045, 0.055]) - - pan_variant = rng.choice(['narrow', 'wide', 'tilt_left', 'tilt_right']) - if kind in ['intro', 'outro'] and rng.random() > 0.5: - pan_variant = 'narrow' - if kind == 'break' and rng.random() > 0.4: - pan_variant = 'wide' - - section_data = { - 'index': index, - 'name': name, - 'bars': int(bars), - 'beats': float(bars * 4), - 'color': color, - 'kind': kind, - 'energy': int(energy), - 'density': round(min(1.35, max(0.68, 0.78 + (energy * 0.08) + rng.uniform(-0.08, 0.14))), 3), - 'swing': round(rng.choice(swing_pool), 3), - 'tension': int(min(5, max(1, energy + rng.choice([-1, 0, 0, 1])))), - 'drum_variant': rng.choice(drum_variants), - 'bass_variant': rng.choice(bass_variants), - 'melodic_variant': rng.choice(melodic_variants), - 'pan_variant': pan_variant, - 'transition_fill': rng.choice(['none', 'snare', 'tom', 'reverse', 'impact']), - } - sections.append(self._ensure_section_pattern_variants(section_data)) - # Check for excessive repetition and force variation if needed - sections = self._check_section_repetition(sections) - return sections - - def _role_intensity(self, role: str, section: Dict[str, Any]) -> int: - kind = section.get('kind', 'drop') - energy = int(section.get('energy', 1)) - role_energy = ROLE_ACTIVITY.get(role, {}).get(kind, 0) - return min(max(role_energy, 0), max(1, energy + 1)) - - def _build_scene_clips(self, role: str, genre: str, style: str, key: str, - scale: str, sections: List[Dict[str, Any]]) -> List[Dict[str, Any]]: - clips = [] - for section in sections: - notes = self._render_scene_notes(role, genre, style, key, scale, section) - if not notes: - continue - - clips.append({ - 'scene_index': section['index'], - 'length': section['beats'], - 'name': f"{role.upper()} - {section['name']}", - 'notes': notes, - }) - return clips - - def _render_scene_notes(self, role: str, genre: str, style: str, key: str, - scale: str, section: Dict[str, Any]) -> List[Dict[str, Any]]: - intensity = self._role_intensity(role, section) - if intensity <= 0: - return [] - - if role in ['sc_trigger', 'kick', 'clap', 'snare_fill', 'hat_closed', 'hat_open', 'top_loop', 'perc', 'tom_fill', 'ride', 'crash']: - return self._render_drum_scene(role, genre, style, section, intensity) - if role in ['sub_bass', 'bass']: - return self._render_bass_scene(role, genre, style, key, section) - if role in ['chords', 'stab', 'pad', 'pluck', 'arp', 'lead', 'counter']: - return self._render_musical_scene(role, genre, key, scale, section) - if role in ['drone', 'reverse_fx', 'riser', 'impact', 'atmos', 'vocal']: - return self._render_fx_scene(role, key, section) - return [] - - def _render_drum_scene(self, role: str, genre: str, style: str, - section: Dict[str, Any], intensity: int) -> List[Dict[str, Any]]: - total_length = float(section['beats']) - kind = section['kind'] - style_text = f"{genre} {style}".lower() - - if role == 'sc_trigger': - pattern = [self._make_note(24, beat, 0.12, 127) for beat in [0.0, 1.0, 2.0, 3.0]] - if kind == 'break': - pattern = [self._make_note(24, beat, 0.1, 118) for beat in [0.0, 2.0]] - return self._repeat_pattern(pattern, total_length, 4.0) - - if role == 'kick': - if genre == 'drum-and-bass': - pattern = [ - self._make_note(36, 0.0, 0.25, 122), - self._make_note(36, 0.75, 0.2, 104), - self._make_note(36, 1.5, 0.2, 112), - self._make_note(36, 2.0, 0.25, 124), - self._make_note(36, 2.75, 0.2, 100), - self._make_note(36, 3.25, 0.2, 92), - ] - elif kind == 'break': - pattern = [ - self._make_note(36, 0.0, 0.25, 118), - self._make_note(36, 2.0, 0.25, 110), - ] - else: - pattern = [self._make_note(36, beat, 0.25, 126 if beat == 0 else 118) for beat in [0.0, 1.0, 2.0, 3.0]] - if intensity >= 4 and genre in ['techno', 'tech-house']: - pattern.append(self._make_note(36, 3.5, 0.15, 94)) - notes = self._repeat_pattern(pattern, total_length, 4.0) - if kind in ['build', 'drop', 'outro']: - notes = self._merge_section_notes(notes, self._build_drum_fill(role, total_length, intensity), total_length) - return self._vary_drum_notes(notes, role, section, total_length) - - if role == 'clap': - pitch = 38 if genre == 'drum-and-bass' else 39 - if kind == 'intro': - pattern = [self._make_note(pitch, 3.0, 0.2, 88)] - elif kind == 'break': - pattern = [self._make_note(pitch, 1.0, 0.2, 84)] - else: - pattern = [ - self._make_note(pitch, 1.0, 0.25, 108), - self._make_note(pitch, 3.0, 0.25, 108), - ] - notes = self._repeat_pattern(pattern, total_length, 4.0) - if kind in ['build', 'drop']: - notes = self._merge_section_notes(notes, self._build_drum_fill(role, total_length, intensity), total_length) - return self._vary_drum_notes(notes, role, section, total_length) - - if role == 'snare_fill': - if kind not in ['build', 'break', 'drop']: - return [] - if str(section.get('transition_fill', 'snare')).lower() not in ['snare', 'impact'] and kind != 'drop': - return [] - fill_span = 2.0 if kind == 'build' and total_length >= 8.0 else 1.0 - fill_start = max(0.0, total_length - fill_span) - step = 0.25 if intensity <= 2 else 0.125 - velocity = 76 - notes = [] - current = fill_start - while current < total_length - 0.01: - notes.append(self._make_note(38, current, 0.08 if step < 0.2 else 0.12, min(124, velocity))) - current += step - velocity += 3 - if kind == 'drop': - notes.insert(0, self._make_note(38, 0.0, 0.15, 102)) - return self._vary_drum_notes(notes, role, section, total_length) - - if role == 'hat_closed': - if intensity <= 1: - pattern = [self._make_note(42, beat, 0.1, 86) for beat in [0.5, 1.5, 2.5, 3.5]] - elif intensity == 2: - pattern = [self._make_note(42, step * 0.5, 0.1, 90 if step % 2 == 0 else 72) for step in range(8)] - else: - pattern = [self._make_note(42, step * 0.5, 0.1, 92 if step % 2 == 0 else 74) for step in range(8)] - pattern.extend([self._make_note(42, 1.75, 0.08, 64), self._make_note(42, 3.75, 0.08, 62)]) - notes = self._repeat_pattern(pattern, total_length, 4.0) - if kind in ['build', 'drop', 'outro']: - notes = self._merge_section_notes(notes, self._build_drum_fill(role, total_length, intensity), total_length) - return self._vary_drum_notes(notes, role, section, total_length) - - if role == 'hat_open': - if kind in ['intro', 'break'] and intensity <= 1: - return [] - pattern = [self._make_note(46, 3.5, 0.35, 82)] - if intensity >= 3: - pattern.append(self._make_note(46, 1.5, 0.25, 74)) - notes = self._repeat_pattern(pattern, total_length, 4.0) - if kind in ['build', 'drop']: - notes = self._merge_section_notes(notes, self._build_drum_fill(role, total_length, intensity), total_length) - return self._vary_drum_notes(notes, role, section, total_length) - - if role == 'top_loop': - if kind in ['intro', 'break'] and intensity <= 1: - return [] - pattern = [ - self._make_note(44, 0.25, 0.08, 56), - self._make_note(44, 0.75, 0.08, 62), - self._make_note(44, 1.25, 0.08, 58), - self._make_note(44, 1.75, 0.08, 66), - self._make_note(44, 2.25, 0.08, 58), - self._make_note(44, 2.75, 0.08, 64), - self._make_note(44, 3.25, 0.08, 60), - self._make_note(44, 3.75, 0.08, 68), - ] - if 'latin' in style_text: - pattern.extend([ - self._make_note(54, 0.5, 0.08, 52), - self._make_note(54, 2.5, 0.08, 54), - ]) - if intensity >= 3: - pattern.extend([ - self._make_note(44, 1.125, 0.06, 48), - self._make_note(44, 3.125, 0.06, 50), - ]) - return self._vary_drum_notes(self._repeat_pattern(pattern, total_length, 4.0), role, section, total_length) - - if role == 'perc': - if kind in ['intro', 'outro'] and intensity <= 1: - return [] - pattern = [ - self._make_note(37, 0.75, 0.1, 62), - self._make_note(37, 1.25, 0.1, 58), - self._make_note(37, 2.75, 0.1, 64), - self._make_note(50, 3.25, 0.12, 70), - ] - if 'latin' in style_text: - pattern.extend([ - self._make_note(64, 1.75, 0.12, 68), - self._make_note(64, 2.125, 0.12, 64), - ]) - if intensity >= 3: - pattern.extend([self._make_note(37, 0.25, 0.1, 56), self._make_note(47, 2.25, 0.1, 68)]) - return self._vary_drum_notes(self._repeat_pattern(pattern, total_length, 4.0), role, section, total_length) - - if role == 'tom_fill': - if kind not in ['build', 'drop']: - return [] - if str(section.get('transition_fill', 'tom')).lower() not in ['tom', 'impact'] and kind != 'drop': - return [] - fill_start = max(0.0, total_length - 1.0) - sequence = [47, 50, 45, 47, 50] - velocities = [72, 76, 80, 88, 96] - notes = [] - for index, pitch in enumerate(sequence): - start = fill_start + (index * 0.2) - if start >= total_length: - break - notes.append(self._make_note(pitch, start, 0.18, velocities[index])) - return self._vary_drum_notes(notes, role, section, total_length) - - if role == 'ride': - if kind not in ['build', 'drop', 'outro']: - return [] - pattern = [self._make_note(51, float(beat), 0.2, 82) for beat in range(4)] - if intensity >= 3: - pattern.extend([self._make_note(51, beat + 0.5, 0.15, 64) for beat in range(4)]) - return self._vary_drum_notes(self._repeat_pattern(pattern, total_length, 4.0), role, section, total_length) - - if role == 'crash': - if kind not in ['build', 'drop', 'break', 'outro']: - return [] - hit_positions = [0.0] - if kind == 'drop' and total_length >= 16.0: - hit_positions.append(8.0) - if kind == 'outro' and total_length >= 8.0: - hit_positions.append(total_length - 4.0) - notes = [ - self._make_note(49, position, min(1.5, max(0.25, total_length - position)), 82 if position == 0.0 else 70) - for position in hit_positions - if position < total_length - ] - return self._vary_drum_notes(notes, role, section, total_length) - - return [] - - def _bass_style_for_section(self, genre: str, style: str, role: str, section_kind: str) -> str: - style_text = f"{genre} {style}".lower() - if role == 'sub_bass': - return 'minimal' if section_kind != 'drop' else 'offbeat' - if 'acid' in style_text: - return 'acid' - if genre == 'house': - return 'offbeat' - if genre == 'drum-and-bass': - return 'rolling' - if section_kind in ['intro', 'outro', 'break']: - return 'minimal' - if genre == 'tech-house': - return 'offbeat' - return 'rolling' - - def _render_bass_scene(self, role: str, genre: str, style: str, key: str, - section: Dict[str, Any]) -> List[Dict[str, Any]]: - total_length = float(section['beats']) - kind = section['kind'] - scale_name = 'minor' if 'm' in key.lower() else 'major' - - if kind == 'break': - notes = self._build_pad_motion(key, scale_name, total_length, 2, 4.0) - else: - notes = self.create_bassline(key, self._bass_style_for_section(genre, style, role, kind), total_length) - - if role == 'sub_bass': - notes = self._transpose_notes(notes, -12) - notes = self._scale_note_lengths(notes, 1.35, minimum=0.2) - notes = self._vary_bass_notes(notes, role, key, section, total_length) - if kind in ['build', 'drop'] and total_length >= 8.0: - turnaround = self._build_turnaround_notes(key, scale_name, total_length, 2 if role == 'bass' else 1, 88 if role == 'bass' else 80) - notes = self._merge_section_notes(notes, turnaround, total_length) - return notes - - def _render_musical_scene(self, role: str, genre: str, key: str, scale: str, - section: Dict[str, Any]) -> List[Dict[str, Any]]: - total_length = float(section['beats']) - kind = section['kind'] - - if role == 'pad': - notes = self._build_pad_motion(key, scale, total_length, 4, 8.0 if kind == 'break' else 4.0) - return self._vary_melodic_notes(notes, role, key, scale, section, total_length) - - if role == 'chords': - progression_type = 'techno' if genre in ['techno', 'tech-house'] else ('trance' if genre == 'trance' else 'house') - notes = self.create_chord_progression(key, progression_type, total_length) - notes = self._scale_note_lengths(notes, 1.15, minimum=0.25) - return self._vary_melodic_notes(notes, role, key, scale, section, total_length) - - if role == 'stab': - notes = self.create_chord_progression(key, 'techno' if genre in ['techno', 'tech-house'] else 'house', total_length) - notes = self._scale_note_lengths(notes, 0.4, minimum=0.1) - shifted = [] - for note in notes: - start = float(note['start']) + (0.5 if int(float(note['start'])) % 2 == 0 else 0.0) - shifted.append(self._make_note(note['pitch'], start, note['duration'], min(118, note['velocity'] + 6))) - return self._vary_melodic_notes(shifted, role, key, scale, section, total_length) - - if role == 'pluck': - notes = self.create_melody(key, scale, total_length, genre) - notes = self._scale_note_lengths(notes, 0.55, minimum=0.12) - return self._vary_melodic_notes(notes, role, key, scale, section, total_length) - - notes = self.create_melody(key, scale, total_length, genre) - if role == 'arp': - notes = self._scale_note_lengths(notes, 0.45, minimum=0.1) - elif role == 'lead': - notes = self._transpose_notes(notes, 12) - elif role == 'counter': - sparse = [] - for note in notes: - start = float(note['start']) - if (start % 4.0) < 2.0: - continue - sparse.append(self._make_note(note['pitch'] - 12, start, max(0.2, float(note['duration']) * 0.8), max(50, int(note['velocity']) - 10))) - notes = sparse - notes = self._vary_melodic_notes(notes, role, key, scale, section, total_length) - if role in ['lead', 'arp', 'pluck', 'counter'] and kind in ['build', 'drop'] and total_length >= 8.0: - notes = self._merge_section_notes(notes, self._build_turnaround_notes(key, scale, total_length, 5, 84), total_length) - return notes - - def _render_fx_scene(self, role: str, key: str, section: Dict[str, Any]) -> List[Dict[str, Any]]: - total_length = float(section['beats']) - kind = section.get('kind', 'drop') - root_note = key[:-1] if len(key) > 1 else key - root_midi = self.note_name_to_midi(root_note, 5) - rng = self._section_rng(section, role, salt=19) - - if role == 'drone': - notes = [ - self._make_note(root_midi - 12, 0.0, min(total_length, 8.0 if kind == 'break' else total_length), 58), - self._make_note(root_midi - 5, max(0.0, total_length / 2.0), min(total_length / 2.0, 8.0), 52), - ] - if kind in ['build', 'drop'] and total_length >= 12.0: - notes.append(self._make_note(root_midi + 2, max(0.0, total_length - 6.0), 4.0, 48)) - return notes - - if role == 'reverse_fx': - if str(section.get('transition_fill', 'reverse')).lower() not in ['reverse', 'impact'] and kind not in ['break', 'build']: - return [] - notes = [] - for span, offset, velocity in ((4.0, 4.0, 70), (2.0, 2.0, 64), (1.0, 1.0, 58)): - if total_length >= offset: - start = max(0.0, total_length - offset) - notes.append(self._make_note(root_midi + 12, start, min(span, total_length - start), velocity)) - if kind == 'build' and total_length >= 16.0 and rng.random() > 0.35: - notes.append(self._make_note(root_midi + 7, max(0.0, total_length - 8.0), 1.5, 56)) - return notes - - if role == 'riser': - notes = [] - sweep_start = max(0.0, total_length - min(8.0, total_length)) - for offset, pitch, velocity in ((0.0, root_midi + 7, 64), (2.0, root_midi + 12, 70), (4.0, root_midi + 19, 74), (6.0, root_midi + 24, 78)): - start = sweep_start + offset - if start < total_length: - notes.append(self._make_note(pitch, start, min(2.0, total_length - start), velocity)) - if kind == 'build' and total_length >= 8.0: - notes.extend([ - self._make_note(root_midi + 12, max(0.0, total_length - 2.0), 0.5, 82), - self._make_note(root_midi + 19, max(0.0, total_length - 1.0), 0.45, 86), - ]) - return notes - - if role == 'impact': - if kind in ['intro', 'outro'] and str(section.get('transition_fill', 'impact')).lower() != 'impact': - return [] - notes = [self._make_note(root_midi + 7, 0.0, 0.5, 82)] - if total_length >= 8.0 and kind in ['build', 'drop']: - notes.append(self._make_note(root_midi + 12, total_length - 0.5, 0.45, 76)) - if kind == 'drop' and total_length >= 16.0 and rng.random() > 0.4: - notes.append(self._make_note(root_midi + 10, 8.0, 0.35, 72)) - return notes - - if role == 'atmos': - notes = [ - self._make_note(root_midi, 0.0, min(8.0, total_length), 54), - self._make_note(root_midi + 7, max(0.0, total_length / 2.0), min(8.0, total_length / 2.0), 50), - ] - if kind in ['intro', 'break', 'outro'] and total_length >= 12.0: - notes.append(self._make_note(root_midi + 12, max(0.0, total_length - 4.0), min(4.0, total_length), 46)) - return notes - - if role == 'vocal': - notes = [] - if kind == 'intro': - base_positions = [7.5, 15.5] - elif kind == 'build': - base_positions = [1.5, 3.5, 5.5, 7.5] - if total_length >= 16.0: - base_positions.extend([11.5, 13.5, 15.5]) - elif kind == 'drop': - base_positions = [1.5, 2.75, 5.5, 6.75] - if total_length >= 16.0: - base_positions.extend([9.5, 10.75, 13.5, 14.75]) - elif kind == 'break': - base_positions = [3.5, 11.5] - else: - base_positions = [1.5, 5.5] - - for index, pos in enumerate(base_positions): - if pos >= total_length: - continue - pitch = root_midi + (10 if kind == 'drop' and index % 2 else 3) - duration = 0.22 if kind == 'drop' else 0.3 - velocity = 80 if kind in ['build', 'drop'] else 72 - if rng.random() > 0.82: - pitch += 12 - notes.append(self._make_note(pitch, pos, duration, velocity)) - - if kind == 'build' and total_length >= 8.0: - notes.append(self._make_note(root_midi + 15, max(0.0, total_length - 0.75), 0.22, 84)) - return notes - - return [] - - def _build_pad_motion(self, key: str, scale_name: str, total_length: float, - octave: int = 4, sustain_beats: float = 4.0) -> List[Dict[str, Any]]: - root_note = key[:-1] if len(key) > 1 else key - root_midi = self.note_name_to_midi(root_note, octave) - scale_notes = self.get_scale_notes(root_midi, scale_name) - progression = random.choice(CHORD_PROGRESSIONS.get('techno' if 'm' in key.lower() else 'house', CHORD_PROGRESSIONS['techno'])) - notes = [] - bars = max(1, int(total_length / 4.0)) - - for bar in range(bars): - degree = progression[bar % len(progression)] - 1 - chord_root = scale_notes[degree % len(scale_notes)] - start = float(bar * 4.0) - duration = min(sustain_beats, total_length - start) - for interval in [0, 7, 12]: - notes.append(self._make_note(chord_root + interval, start, duration, 66)) - return notes - - def _generate_drum_tracks(self, genre: str, style: str) -> List[Dict]: - """Genera tracks de batería""" - tracks = [] - - # Kick siempre - tracks.append({ - 'name': 'Kick', - 'type': 'midi', - 'color': TRACK_COLORS['kick'], - 'clip': { - 'slot': 0, - 'length': 4.0, - 'notes': self._create_kick_pattern(genre, style) - } - }) - - # Snare/Clap - tracks.append({ - 'name': 'Clap', - 'type': 'midi', - 'color': TRACK_COLORS['clap'], - 'clip': { - 'slot': 0, - 'length': 4.0, - 'notes': self._create_clap_pattern(genre, style) - } - }) - - # Hi-hats - tracks.append({ - 'name': 'HiHat', - 'type': 'midi', - 'color': TRACK_COLORS['hat'], - 'clip': { - 'slot': 0, - 'length': 4.0, - 'notes': self._create_hat_pattern(genre, style) - } - }) - - # Percusión extra para estilos más complejos - if style in ['latin', 'afro', 'groovy', 'complex']: - tracks.append({ - 'name': 'Percussion', - 'type': 'midi', - 'color': TRACK_COLORS['hat'], - 'clip': { - 'slot': 0, - 'length': 4.0, - 'notes': self._create_perc_pattern(genre, style) - } - }) - - return tracks - - def _generate_bass_track(self, key: str, scale: str, genre: str, style: str) -> Dict: - """Genera un track de bajo""" - notes = self.create_bassline(key, style, 16.0) - - return { - 'name': 'Bass', - 'type': 'midi', - 'color': TRACK_COLORS['bass'], - 'clip': { - 'slot': 0, - 'length': 16.0, - 'notes': notes - } - } - - def _generate_chord_track(self, key: str, scale: str, genre: str) -> Dict: - """Genera un track de acordes""" - notes = self.create_chord_progression(key, genre, 16.0) - - return { - 'name': 'Chords', - 'type': 'midi', - 'color': TRACK_COLORS['chords'], - 'clip': { - 'slot': 0, - 'length': 16.0, - 'notes': notes - } - } - - def _generate_lead_track(self, key: str, scale: str, genre: str) -> Dict: - """Genera un track lead/melódico""" - notes = self.create_melody(key, scale, 16.0, genre) - - return { - 'name': 'Lead', - 'type': 'midi', - 'color': TRACK_COLORS['synth'], - 'clip': { - 'slot': 0, - 'length': 16.0, - 'notes': notes - } - } - - def _generate_fx_track(self) -> Dict: - """Genera un track de FX/Atmósfera""" - return { - 'name': 'FX', - 'type': 'midi', - 'color': TRACK_COLORS['fx'], - 'clip': { - 'slot': 0, - 'length': 16.0, - 'notes': self._create_fx_notes() - } - } - - # ========================================================================= - # PATRONES DE BATERÍA - # ========================================================================= - - def _create_kick_pattern(self, genre: str, style: str) -> List[Dict]: - """Crea patrón de kick""" - notes = [] - - if style == 'minimal': - # Kick en 1 y 2.5 - for bar in range(4): - notes.append({'pitch': 36, 'start': bar * 4.0, 'duration': 0.25, 'velocity': 120}) - notes.append({'pitch': 36, 'start': bar * 4.0 + 2.5, 'duration': 0.25, 'velocity': 110}) - elif style == 'four-on-the-floor' or genre in ['house', 'tech-house']: - # 4/4 clásico - for bar in range(4): - for beat in range(4): - notes.append({'pitch': 36, 'start': bar * 4.0 + beat, 'duration': 0.25, 'velocity': 127}) - else: # Default techno - for bar in range(4): - for beat in range(4): - vel = 127 if beat == 0 else 115 - notes.append({'pitch': 36, 'start': bar * 4.0 + beat, 'duration': 0.25, 'velocity': vel}) - - return notes - - def _create_clap_pattern(self, genre: str, style: str) -> List[Dict]: - """Crea patrón de clap/snare""" - notes = [] - - # Claps en 2 y 4 (beats 1 y 3 en 0-indexed) - for bar in range(4): - notes.append({'pitch': 40, 'start': bar * 4.0 + 1.0, 'duration': 0.25, 'velocity': 110}) - notes.append({'pitch': 40, 'start': bar * 4.0 + 3.0, 'duration': 0.25, 'velocity': 110}) - - # Snare adicional para DnB/Jungle - if genre == 'drum-and-bass': - for bar in range(4): - notes.append({'pitch': 38, 'start': bar * 4.0 + 1.75, 'duration': 0.1, 'velocity': 90}) - notes.append({'pitch': 38, 'start': bar * 4.0 + 2.25, 'duration': 0.1, 'velocity': 85}) - - return notes - - def _create_hat_pattern(self, genre: str, style: str) -> List[Dict]: - """Crea patrón de hi-hats""" - notes = [] - - if style in ['minimal', 'dub']: - # Off-bats simples - for bar in range(4): - for beat in range(4): - notes.append({'pitch': 42, 'start': bar * 4.0 + beat + 0.5, 'duration': 0.1, 'velocity': 90}) - elif style in ['tech-house-swing', 'jackin', 'swing', 'latin-tech-house']: - # MJ-02: Tech house swing hats - 16% swing on 1/8 notes - swing_offset = 0.04 # ~16% swing at 16th note level - for bar in range(4): - for beat in range(4): - # Straight 8th note - time_straight = bar * 4.0 + beat * 1.0 - notes.append({'pitch': 42, 'start': time_straight, 'duration': 0.1, 'velocity': 95}) - # Swung off-beat 8th - time_off = bar * 4.0 + beat + 0.5 + swing_offset - notes.append({'pitch': 42, 'start': time_off, 'duration': 0.1, 'velocity': 75}) - # Open hat at end of every other bar - if bar % 2 == 1: - notes.append({'pitch': 46, 'start': bar * 4.0 + 3.5, 'duration': 0.4, 'velocity': 80}) - elif style == 'tech-house-jackin': - # MJ-02: Denser hat pattern for jackin tech house - for bar in range(4): - for beat in range(4): - for sub in range(2): - time = bar * 4.0 + beat + sub * 0.5 - vel = 100 if sub == 0 else 80 - notes.append({'pitch': 42, 'start': time, 'duration': 0.08, 'velocity': vel}) - # 16th note fill in last beat - notes.append({'pitch': 42, 'start': bar * 4.0 + 3.75, 'duration': 0.05, 'velocity': 65}) - notes.append({'pitch': 46, 'start': bar * 4.0 + 2.5, 'duration': 0.5, 'velocity': 85}) - elif style == 'tech-house-minimal': - # MJ-02: Sparse, subtle hats for minimal tech house - for bar in range(4): - notes.append({'pitch': 42, 'start': bar * 4.0 + 0.5, 'duration': 0.1, 'velocity': 80}) - notes.append({'pitch': 42, 'start': bar * 4.0 + 2.5, 'duration': 0.1, 'velocity': 70}) - if bar % 2 == 1: - notes.append({'pitch': 46, 'start': bar * 4.0 + 3.5, 'duration': 0.3, 'velocity': 60}) - else: - # 8vos con variación - for bar in range(4): - for beat in range(4): - for sub in range(2): - time = bar * 4.0 + beat + sub * 0.5 - vel = 90 if sub == 0 else 70 - notes.append({'pitch': 42, 'start': time, 'duration': 0.1, 'velocity': vel}) - - # Open hats ocasionales - if style not in ['minimal']: - for bar in range(4): - notes.append({'pitch': 46, 'start': bar * 4.0 + 3.5, 'duration': 0.5, 'velocity': 80}) - - return notes - - def _create_perc_pattern(self, genre: str, style: str) -> List[Dict]: - """Crea patrón de percusión extra""" - notes = [] - - if style in ['latin-tech-house', 'latin', 'latin-industrial']: - # MJ-05: Latin tech house percussion - congas/bongos - for bar in range(4): - # Conga pattern (high conga = pitch 50, low conga = pitch 43) - # Tumbao pattern - notes.append({'pitch': 50, 'start': bar * 4.0 + 0.5, 'duration': 0.15, 'velocity': 85}) - notes.append({'pitch': 50, 'start': bar * 4.0 + 2.5, 'duration': 0.15, 'velocity': 90}) - notes.append({'pitch': 43, 'start': bar * 4.0 + 1.0, 'duration': 0.2, 'velocity': 75}) - notes.append({'pitch': 43, 'start': bar * 4.0 + 3.0, 'duration': 0.2, 'velocity': 80}) - # Bongo accent - if bar % 2 == 0: - notes.append({'pitch': 48, 'start': bar * 4.0 + 1.5, 'duration': 0.1, 'velocity': 70}) - notes.append({'pitch': 48, 'start': bar * 4.0 + 3.5, 'duration': 0.1, 'velocity': 65}) - # Shaker layer - for i in range(8): - time = bar * 4.0 + i * 0.5 - if i % 2 == 1: - notes.append({'pitch': 53, 'start': time, 'duration': 0.05, 'velocity': 50 + random.randint(-5, 5)}) - else: - for bar in range(4): - # Shakers/congas en 16vos - for i in range(16): - time = bar * 4.0 + i * 0.25 - if i % 4 != 0: # Skip downbeats - vel = 60 + random.randint(-10, 10) - notes.append({'pitch': 37, 'start': time, 'duration': 0.1, 'velocity': vel}) - - return notes - - def _create_fx_notes(self) -> List[Dict]: - """Crea notas para FX/atmósfera""" - notes = [] - - # Swells y risers - for bar in [0, 2]: - # Nota larga ascendente - notes.append({'pitch': 84, 'start': bar * 4.0 + 3.0, 'duration': 1.0, 'velocity': 70}) - - return notes - - # ========================================================================= - # CREACIÓN DE PATRONES PARA MCP - # ========================================================================= - - def create_drum_pattern(self, style: str, pattern_type: str, length: float) -> List[Dict]: - """Crea un patrón de batería completo para usar con MCP""" - notes = [] - bars = int(length / 4.0) - - if pattern_type == 'kick-only': - for bar in range(bars): - for beat in range(4): - notes.append({'pitch': 36, 'start': bar * 4.0 + beat, 'duration': 0.25, 'velocity': 127}) - - elif pattern_type == 'hats-only': - for bar in range(bars): - for beat in range(4): - notes.append({'pitch': 42, 'start': bar * 4.0 + beat + 0.5, 'duration': 0.1, 'velocity': 90}) - - elif pattern_type == 'minimal': - for bar in range(bars): - notes.append({'pitch': 36, 'start': bar * 4.0, 'duration': 0.25, 'velocity': 127}) - notes.append({'pitch': 40, 'start': bar * 4.0 + 2.0, 'duration': 0.25, 'velocity': 110}) - notes.append({'pitch': 42, 'start': bar * 4.0 + 2.5, 'duration': 0.1, 'velocity': 80}) - - elif style == 'tech-house-swing': - # MJ-02: Tech house with swing - kick 1&3, ghost kicks on 2.5&3.5, swing hats, ghost clap - for bar in range(bars): - # Main kicks on 1 and 3 - notes.append({'pitch': 36, 'start': bar * 4.0, 'duration': 0.25, 'velocity': 127}) - notes.append({'pitch': 36, 'start': bar * 4.0 + 2.0, 'duration': 0.25, 'velocity': 127}) - # Ghost kicks on 2.5 and 3.5 - notes.append({'pitch': 36, 'start': bar * 4.0 + 1.5, 'duration': 0.15, 'velocity': 95}) - notes.append({'pitch': 36, 'start': bar * 4.0 + 3.5, 'duration': 0.15, 'velocity': 90}) - # Clap on 2 and 4 with ghost note - notes.append({'pitch': 40, 'start': bar * 4.0 + 1.0, 'duration': 0.2, 'velocity': 110}) - notes.append({'pitch': 40, 'start': bar * 4.0 + 1.85, 'duration': 0.08, 'velocity': 60}) - notes.append({'pitch': 40, 'start': bar * 4.0 + 3.0, 'duration': 0.2, 'velocity': 110}) - # Swing hats (16% swing) - swing = 0.04 - for beat in range(4): - notes.append({'pitch': 42, 'start': bar * 4.0 + beat, 'duration': 0.1, 'velocity': 90}) - notes.append({'pitch': 42, 'start': bar * 4.0 + beat + 0.5 + swing, 'duration': 0.1, 'velocity': 70}) - # Open hat - notes.append({'pitch': 46, 'start': bar * 4.0 + 3.5, 'duration': 0.4, 'velocity': 75}) - - elif style == 'tech-house-jackin': - # MJ-02: Jackin tech house - energetic, dense hats, harder clap - for bar in range(bars): - for beat in range(4): - vel = 127 if beat in [0, 2] else 105 - notes.append({'pitch': 36, 'start': bar * 4.0 + beat, 'duration': 0.25, 'velocity': vel}) - # Strong clap on 2 and 4 - notes.append({'pitch': 40, 'start': bar * 4.0 + 1.0, 'duration': 0.2, 'velocity': 120}) - notes.append({'pitch': 40, 'start': bar * 4.0 + 3.0, 'duration': 0.2, 'velocity': 120}) - # Dense 16th hats - for i in range(16): - time = bar * 4.0 + i * 0.25 - vel = 100 if i % 4 == 0 else 75 if i % 2 == 0 else 55 - notes.append({'pitch': 42, 'start': time, 'duration': 0.08, 'velocity': vel}) - # Open hat every bar - notes.append({'pitch': 46, 'start': bar * 4.0 + 2.5, 'duration': 0.5, 'velocity': 85}) - - elif style == 'tech-house-minimal': - # MJ-02: Minimal tech house - sparse kick, subtle perc - for bar in range(bars): - notes.append({'pitch': 36, 'start': bar * 4.0, 'duration': 0.25, 'velocity': 120}) - if bar % 2 == 0: - notes.append({'pitch': 36, 'start': bar * 4.0 + 2.0, 'duration': 0.25, 'velocity': 105}) - # Very sparse hats - notes.append({'pitch': 42, 'start': bar * 4.0 + 1.5, 'duration': 0.08, 'velocity': 65}) - notes.append({'pitch': 42, 'start': bar * 4.0 + 3.5, 'duration': 0.08, 'velocity': 60}) - # Subtle clap on 2 and 4 every other bar - if bar % 2 == 1: - notes.append({'pitch': 40, 'start': bar * 4.0 + 1.0, 'duration': 0.15, 'velocity': 80}) - notes.append({'pitch': 40, 'start': bar * 4.0 + 3.0, 'duration': 0.15, 'velocity': 80}) - - else: # full - notes.extend(self._create_kick_pattern(style, 'standard')) - notes.extend(self._create_clap_pattern(style, 'standard')) - notes.extend(self._create_hat_pattern(style, 'standard')) - - return notes - - return notes - - def create_bassline(self, key: str, style: str, length: float) -> List[Dict]: - """Crea una línea de bajo musical""" - notes = [] - - # Parsear key - root_note = key[:-1] if len(key) > 1 else key - is_minor = 'm' in key.lower() - scale_name = 'minor' if is_minor else 'major' - - root_midi = self.note_name_to_midi(root_note, 2) # Octava 2 para bajo - scale_notes = self.get_scale_notes(root_midi, scale_name) - - bars = int(length / 4.0) - - if style == 'tech-house': - # MJ-03: Tech house bass - syncopated, groovy with velocity variations - for bar in range(bars): - # Beat 1: root on downbeat - notes.append({'pitch': root_midi, 'start': bar * 4.0, 'duration': 0.3, 'velocity': 120}) - # Off-beat after beat 1: fifth or octave - fifth = scale_notes[4] if len(scale_notes) > 4 else root_midi + 7 - notes.append({'pitch': fifth, 'start': bar * 4.0 + 0.75, 'duration': 0.2, 'velocity': 85}) - # Beat 2: syncopated - skip beat 2, play on 2.5 - notes.append({'pitch': root_midi, 'start': bar * 4.0 + 2.25, 'duration': 0.25, 'velocity': 95}) - # Beat 3: root again - notes.append({'pitch': root_midi, 'start': bar * 4.0 + 3.0, 'duration': 0.3, 'velocity': 110}) - # Off-beat ghost - minor_third = scale_notes[2] if len(scale_notes) > 2 else root_midi + 3 - notes.append({'pitch': minor_third, 'start': bar * 4.0 + 3.5, 'duration': 0.15, 'velocity': 70}) - # Bar variation: every 2nd bar add extra syncopation - if bar % 2 == 1: - notes.append({'pitch': root_midi, 'start': bar * 4.0 + 1.5, 'duration': 0.15, 'velocity': 80}) - - elif style == 'rolling': - # Bass en 16vos - for bar in range(bars): - for beat in range(4): - for sub in range(4): - time = bar * 4.0 + beat + sub * 0.25 - if sub == 0: - pitch = root_midi - vel = 120 - elif sub == 2: - pitch = scale_notes[4] if len(scale_notes) > 4 else root_midi + 7 - vel = 100 - else: - pitch = root_midi - vel = 80 if sub % 2 == 0 else 70 - - notes.append({'pitch': pitch, 'start': time, 'duration': 0.2, 'velocity': vel}) - - elif style == 'minimal': - # Solo en beats 1 y 3 - for bar in range(bars): - for beat in [0, 2]: - time = bar * 4.0 + beat - notes.append({'pitch': root_midi, 'start': time, 'duration': 1.5, 'velocity': 110}) - - elif style == 'offbeat': - # Notas en off-beats (house típico) - for bar in range(bars): - for beat in range(4): - time = bar * 4.0 + beat + 0.5 - pitch = root_midi if beat % 2 == 0 else scale_notes[3] - notes.append({'pitch': pitch, 'start': time, 'duration': 0.4, 'velocity': 100}) - - elif style == 'acid': - # Estilo TB-303 con slides - for bar in range(bars): - for i in range(8): - time = bar * 4.0 + i * 0.5 - pitch = root_midi + random.choice([0, 3, 5, 7, 10]) - vel = 90 + random.randint(-20, 20) - notes.append({'pitch': pitch, 'start': time, 'duration': 0.4, 'velocity': min(127, max(60, vel))}) - - else: # walking - for bar in range(bars): - for beat in range(4): - time = bar * 4.0 + beat - if beat == 0: - pitch = root_midi - elif beat == 1: - pitch = scale_notes[2] if len(scale_notes) > 2 else root_midi + 3 - elif beat == 2: - pitch = scale_notes[3] if len(scale_notes) > 3 else root_midi + 5 - else: - pitch = scale_notes[4] if len(scale_notes) > 4 else root_midi + 7 - - notes.append({'pitch': pitch, 'start': time, 'duration': 0.9, 'velocity': 100}) - - return notes - - def create_chord_progression(self, key: str, progression_type: str, length: float) -> List[Dict]: - """Crea una progresión de acordes""" - notes = [] - - # Parsear key - root_note = key[:-1] if len(key) > 1 else key - is_minor = 'm' in key.lower() - scale_name = 'minor' if is_minor else 'major' - - root_midi = self.note_name_to_midi(root_note, 4) # Octava 4 para acordes - scale_notes = self.get_scale_notes(root_midi, scale_name) - - # Seleccionar progresión - progressions = CHORD_PROGRESSIONS.get(progression_type, CHORD_PROGRESSIONS['techno']) - progression = random.choice(progressions) - - bars = int(length / 4.0) - beats_per_bar = 4 - - for bar in range(bars): - degree = progression[bar % len(progression)] - 1 - - if degree < len(scale_notes): - chord_root = scale_notes[degree] - else: - chord_root = root_midi - - # Construir acorde (triada) - third = 3 if 'minor' in scale_name else 4 - chord_tones = [chord_root, chord_root + third, chord_root + 7] - - # Stab chords - cortos y percusivos - if progression_type == 'techno': - for pitch in chord_tones: - notes.append({ - 'pitch': pitch, - 'start': bar * beats_per_bar, - 'duration': 0.25, - 'velocity': 90 - }) - elif progression_type == 'house': - for beat in [0.5, 2.5]: - for pitch in chord_tones: - notes.append({ - 'pitch': pitch, - 'start': bar * beats_per_bar + beat, - 'duration': 0.5, - 'velocity': 75 - }) - else: - # Default: acordes en beats 1 y 3 - for beat in [0, 2]: - for pitch in chord_tones: - notes.append({ - 'pitch': pitch, - 'start': bar * beats_per_bar + beat, - 'duration': 1.0, - 'velocity': 85 - }) - - return notes - - def create_melody(self, key: str, scale: str, length: float, genre: str) -> List[Dict]: - """Crea una melodía/lead""" - notes = [] - - root_note = key[:-1] if len(key) > 1 else key - root_midi = self.note_name_to_midi(root_note, 5) # Octava 5 para lead - scale_notes = self.get_scale_notes(root_midi, scale) - - bars = max(1, int(length / 4.0)) - motif_pool = [ - ([0, 2, 4, 2, 5, 4], [0.0, 0.5, 1.5, 2.0, 2.75, 3.25]), - ([0, 3, 4, 6, 4], [0.0, 0.75, 1.5, 2.5, 3.25]), - ([0, 2, 3, 5, 3, 2], [0.0, 0.5, 1.0, 2.0, 2.5, 3.5]), - ] - motif_steps, motif_times = random.choice(motif_pool) - - for bar in range(bars): - bar_offset = bar * 4.0 - phrase_shift = 0 if bar % 4 in [0, 1] else random.choice([0, 1, -1, 2]) - invert_tail = (bar % 4 == 3) - for index, step in enumerate(motif_steps): - start = bar_offset + motif_times[index % len(motif_times)] - if start >= length: - continue - if invert_tail and index >= max(1, len(motif_steps) - 2): - start += 0.25 - if random.random() < 0.18 and index not in [0, len(motif_steps) - 1]: - continue - - scale_index = (step + phrase_shift) % len(scale_notes) - pitch = scale_notes[scale_index] - if genre in ['trance', 'progressive'] and index == len(motif_steps) - 1: - pitch += 12 - elif genre in ['techno', 'tech-house'] and index % 3 == 2: - pitch -= 12 - - duration = 0.22 if start % 1.0 not in [0.0, 0.5] else 0.35 - velocity = 78 + ((index + bar) % 3) * 8 + random.randint(-6, 8) - notes.append({ - 'pitch': pitch, - 'start': start, - 'duration': duration, - 'velocity': max(60, min(123, velocity)) - }) - - return notes - - # ========================================================================= - # Human Feel Integration - # ========================================================================= - - def apply_human_feel(self, config: Dict[str, Any], intensity: float = 0.6) -> Dict[str, Any]: - """ - Aplica herramientas de human feel a una configuración generada. - - Args: - config: Configuración del generador - intensity: Intensidad (0.3=sutil, 0.6=groove, 1.0=vivo) - - Returns: - Configuración con human feel aplicado - - Configuración por intensidad: - - 0.3 (sutil): Fades rápidos en intro/outro, LFO ±1.5dB, sparse fills, 8% swing - - 0.6 (groove): Fades en tutti, LFO ±3dB con S-curve, medium fills, 14% swing - - 1.0 (vivo): Fades agresivos con pump, LFO ±5dB, heavy fills, 18% swing - """ - import random - - # Configuración por intensidad - intensity_config = { - 0.3: { - 'fade_in_bars': 2.0, - 'fade_out_bars': 2.0, - 'lfo_depth': 1.5, - 'lfo_rate': 0.25, - 'fill_density': 'sparse', - 'swing_percent': 8.0, - 'sidechain_style': 'subtle', - }, - 0.6: { - 'fade_in_bars': 4.0, - 'fade_out_bars': 4.0, - 'lfo_depth': 3.0, - 'lfo_rate': 0.5, - 'fill_density': 'medium', - 'swing_percent': 14.0, - 'sidechain_style': 'jackin', - }, - 1.0: { - 'fade_in_bars': 6.0, - 'fade_out_bars': 6.0, - 'lfo_depth': 5.0, - 'lfo_rate': 0.75, - 'fill_density': 'heavy', - 'swing_percent': 18.0, - 'sidechain_style': 'jackin', - }, - } - - cfg = intensity_config.get(intensity, intensity_config[0.6]) - - # Aplicar swing a patrones MIDI - for track in config.get('tracks', []): - if track.get('type') == 'midi': - # Aplicar swing al pattern - pattern = track.get('pattern', []) - if pattern: - track['swing_percent'] = cfg['swing_percent'] - - # Agregar automation de volumen por sección - for track in config.get('tracks', []): - role = track.get('role', '') - if role in ['kick', 'bass', 'top_loop', 'synth_loop']: - # Automatización de volumen por sección - track['volume_automation'] = { - 'curve_type': 's_curve' if intensity >= 0.6 else 'linear', - 'section_map': { - 'intro': 0.5 + (0.35 * intensity), - 'build': 0.7 + (0.15 * intensity), - 'drop': 0.85 + (0.1 * intensity), - 'break': 0.6 + (0.2 * intensity), - 'outro': 0.4 + (0.2 * intensity), - } - } - - # Agregar sidechain pump para buses - if 'buses' in config: - for bus_name, bus in config['buses'].items(): - if bus_name in ['drums', 'bass', 'music']: - bus['sidechain_pump'] = { - 'intensity': intensity, - 'style': cfg['sidechain_style'], - 'attack': 0.001 * (1.0 / intensity), - 'release': 0.1 * intensity, - } - - # Agregar fills automáticos - for track in config.get('tracks', []): - if track.get('type') == 'midi' and track.get('role') in ['kick', 'snare', 'hat']: - track['pattern_fills'] = { - 'density': cfg['fill_density'], - 'section': 'all', - 'fill_probability': 0.125 * (1 + intensity), # 1 cada 8-4 bars - } - - # Agregar variación de loops por sección - for track in config.get('tracks', []): - if track.get('type') == 'audio' and track.get('role'): - track['loop_variation'] = { - 'intro': 'filtered' if intensity >= 0.3 else 'standard', - 'build': 'building' if intensity >= 0.6 else 'standard', - 'drop': 'full' if intensity >= 0.6 else 'standard', - 'break': 'sparse' if intensity >= 0.3 else 'standard', - 'outro': 'fading' if intensity >= 0.6 else 'standard', - } - - return config - - def generate_with_human_feel(self, genre: str, style: str, bpm: float, key: str, - structure: str, intensity: float = 0.6) -> Dict[str, Any]: - """ - Genera una configuración completa con human feel aplicado. - - Args: - genre: Género musical - style: Sub-estilo - bpm: BPM - key: Tonalidad - structure: Estructura ('standard', 'club', 'tech-house-dj') - intensity: Intensidad de human feel (0.3-1.0) - - Returns: - Configuración completa con human feel - """ - # Generar configuración base - config = self.generate_config(genre, style, bpm, key, structure) - - # Aplicar human feel - config = self.apply_human_feel(config, intensity) - - return config - diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/template_analyzer.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/template_analyzer.py deleted file mode 100644 index b1823d2..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/template_analyzer.py +++ /dev/null @@ -1,177 +0,0 @@ -from __future__ import annotations - -import argparse -import gzip -import json -from collections import Counter -from pathlib import Path -import xml.etree.ElementTree as ET - - -def _node_name(node: ET.Element | None) -> str: - if node is None: - return "" - for tag in ("EffectiveName", "UserName", "Name"): - child = node.find(tag) - if child is not None: - value = child.attrib.get("Value", "") - if value: - return value - return node.attrib.get("Value", "") - - -def _device_name(device: ET.Element) -> str: - if device.tag == "PluginDevice": - info = device.find("PluginDesc/VstPluginInfo") - if info is None: - info = device.find("PluginDesc/AuPluginInfo") - if info is not None: - plug = info.find("PlugName") - if plug is not None and plug.attrib.get("Value"): - return plug.attrib["Value"] - return device.tag - - -def _session_clip_count(track: ET.Element) -> int: - count = 0 - for slot in track.findall("./DeviceChain/MainSequencer/ClipSlotList/ClipSlot"): - if slot.find("Value/MidiClip") is not None or slot.find("Value/AudioClip") is not None: - count += 1 - return count - - -def _arrangement_clip_count(track: ET.Element) -> int: - return len(track.findall(".//MainSequencer//MidiClip")) + len( - track.findall(".//MainSequencer//AudioClip") - ) - - -def _tempo_value(live_set: ET.Element) -> float | None: - node = live_set.find(".//Tempo/Manual") - if node is None: - return None - try: - return float(node.attrib.get("Value", "0")) - except ValueError: - return None - - -def _locator_summary(live_set: ET.Element) -> list[dict[str, float | str | None]]: - locators: list[tuple[float, str]] = [] - for locator in live_set.findall(".//Locators/Locators/Locator"): - try: - time = float(locator.find("Time").attrib.get("Value", "0")) - except (AttributeError, ValueError): - time = 0.0 - name = _node_name(locator.find("Name")) - locators.append((time, name)) - locators.sort(key=lambda item: item[0]) - summary: list[dict[str, float | str | None]] = [] - for index, (time, name) in enumerate(locators): - next_time = locators[index + 1][0] if index + 1 < len(locators) else None - summary.append( - { - "time_beats": time, - "name": name, - "section_length_beats": None if next_time is None else next_time - time, - } - ) - return summary - - -def _arrangement_length_beats(root: ET.Element) -> float: - max_end = 0.0 - for clip in root.findall(".//MidiClip") + root.findall(".//AudioClip"): - current_end = clip.find("CurrentEnd") - start = clip.attrib.get("Time") - if current_end is None or start is None: - continue - try: - end = float(start) + float(current_end.attrib.get("Value", "0")) - except ValueError: - continue - max_end = max(max_end, end) - return max_end - - -def analyze_set(als_path: Path) -> dict: - with gzip.open(als_path, "rb") as handle: - root = ET.parse(handle).getroot() - live_set = root.find("LiveSet") - if live_set is None: - raise ValueError(f"Invalid ALS file: {als_path}") - - tracks = list(live_set.find("Tracks") or []) - track_summaries = [] - device_counter: Counter[str] = Counter() - - for track in tracks: - devices = track.findall("./DeviceChain/DeviceChain/Devices/*") - device_names = [_device_name(device) for device in devices] - device_counter.update(device_names) - track_summaries.append( - { - "type": track.tag, - "name": _node_name(track.find("Name")), - "group_id": track.find("TrackGroupId").attrib.get("Value", "") - if track.find("TrackGroupId") is not None - else "", - "session_clip_count": _session_clip_count(track), - "arrangement_clip_count": _arrangement_clip_count(track), - "devices": device_names, - } - ) - - automation_events = 0 - for automation in root.findall(".//ArrangerAutomation"): - automation_events += len(automation.findall(".//FloatEvent")) - automation_events += len(automation.findall(".//EnumEvent")) - automation_events += len(automation.findall(".//BoolEvent")) - - return { - "file": str(als_path), - "tempo": _tempo_value(live_set), - "track_type_counts": dict(Counter(track.tag for track in tracks)), - "scene_count": len(live_set.findall("./SceneNames/Scene")), - "locators": _locator_summary(live_set), - "arrangement_length_beats": _arrangement_length_beats(root), - "automation_event_count": automation_events, - "top_devices": dict(device_counter.most_common(16)), - "tracks": track_summaries, - } - - -def main() -> None: - parser = argparse.ArgumentParser(description="Analyze Ableton .als templates.") - parser.add_argument("path", nargs="?", default=".", help="Folder containing .als files") - parser.add_argument("--json", action="store_true", help="Emit JSON") - args = parser.parse_args() - - base = Path(args.path).resolve() - results = [analyze_set(path) for path in sorted(base.rglob("*.als"))] - - if args.json: - print(json.dumps(results, indent=2)) - return - - for result in results: - print(f"=== {Path(result['file']).name} ===") - print(f"tempo: {result['tempo']}") - print(f"tracks: {result['track_type_counts']}") - print(f"scenes: {result['scene_count']}") - print(f"arrangement_length_beats: {result['arrangement_length_beats']}") - print(f"automation_event_count: {result['automation_event_count']}") - print("locators:") - for locator in result["locators"]: - print( - f" - {locator['time_beats']:>6} {locator['name']}" - f" len={locator['section_length_beats']}" - ) - print("top_devices:") - for name, count in result["top_devices"].items(): - print(f" - {name}: {count}") - print() - - -if __name__ == "__main__": - main() diff --git a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/vector_manager.py b/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/vector_manager.py deleted file mode 100644 index 99572c2..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/MCP_Server/vector_manager.py +++ /dev/null @@ -1,452 +0,0 @@ -import os -import json -import logging -import argparse -from pathlib import Path -from typing import List, Dict, Tuple, Optional -from multiprocessing import Pool, cpu_count -import functools - -try: - from sentence_transformers import SentenceTransformer - from sklearn.metrics.pairwise import cosine_similarity - import numpy as np - HAS_ML = True -except ImportError: - HAS_ML = False - -# Import AudioAnalyzer for spectral analysis -try: - from audio_analyzer import AudioAnalyzer, analyze_sample - HAS_AUDIO_ANALYZER = True -except ImportError: - HAS_AUDIO_ANALYZER = False - -logger = logging.getLogger("VectorManager") -logging.basicConfig(level=logging.INFO) - - -# Global analyzer for multiprocessing workers (initialized once per worker) -_worker_analyzer = None - -def _init_worker(): - """Initialize the audio analyzer for each worker process.""" - global _worker_analyzer - if HAS_AUDIO_ANALYZER: - try: - _worker_analyzer = AudioAnalyzer(backend="auto") - except Exception: - _worker_analyzer = None - -def _process_single_file(args): - """ - Process a single audio file and return its metadata. - Used for multiprocessing parallel execution. - """ - f, library_dir, skip_audio_analysis = args - f = Path(f) - - import soundfile as sf - - # Clean up the name for better semantic understanding - name = f.stem - name_lower = name.lower() - clean_name = name.replace('_', ' ').replace('-', ' ').lower() - - # Keywords that strongly suggest a full song/mix - full_song_keywords = {'original mix', 'extended mix', 'full mix', 'edit', 'master', '320kbps', 'remix'} - - # Extract duration - duration = 0.0 - try: - info = sf.info(str(f)) - duration = info.duration - except Exception: - duration = -1.0 - - # Detect if it's likely a full song based on name and duration - is_full_song = False - if duration > 45.0: - is_full_song = True - elif any(kw in name_lower for kw in full_song_keywords) and duration > 30.0: - is_full_song = True - - # Spectral analysis with AudioAnalyzer - key = None - key_confidence = 0.0 - spectral_centroid = None - is_harmonic = None - - global _worker_analyzer - if not skip_audio_analysis and _worker_analyzer is not None: - try: - features = _worker_analyzer.analyze(str(f)) - key = features.key - key_confidence = features.key_confidence - spectral_centroid = features.spectral_centroid - is_harmonic = features.is_harmonic - except Exception: - pass - - # Use relative path as part of the context - try: - rel_path = f.relative_to(library_dir) - parts = rel_path.parts[:-1] - path_context = " ".join(parts).lower() - except ValueError: - path_context = "" - - description = f"{clean_name} {path_context}" - - metadata = { - 'path': str(f), - 'name': name, - 'description': description, - 'duration': duration, - 'is_full_song': is_full_song, - 'key': key, - 'key_confidence': key_confidence, - 'spectral_centroid': spectral_centroid, - 'is_harmonic': is_harmonic - } - - return metadata, description - -class VectorManager: - def __init__(self, library_dir: str, skip_audio_analysis: bool = False): - self.library_dir = Path(library_dir) - self.index_file = self.library_dir / ".sample_embeddings.json" - self.skip_audio_analysis = skip_audio_analysis - - self.model = None - self.embeddings = [] - self.metadata = [] - - # Audio analyzer instance for spectral analysis - self._audio_analyzer: Optional[AudioAnalyzer] = None - if HAS_AUDIO_ANALYZER and not skip_audio_analysis: - try: - self._audio_analyzer = AudioAnalyzer(backend="auto") - logger.info("AudioAnalyzer initialized for spectral analysis") - except Exception as e: - logger.warning(f"Failed to initialize AudioAnalyzer: {e}") - self._audio_analyzer = None - - if HAS_ML: - try: - # Load a very lightweight model for fast embeddings - logger.info("Loading sentence-transformers model (all-MiniLM-L6-v2)...") - self.model = SentenceTransformer('all-MiniLM-L6-v2') - except Exception as e: - logger.error(f"Failed to load embedding model: {e}") - - self._load_or_build_index() - - def _get_library_fingerprint(self) -> Dict: - """Compute a fingerprint of the library directory for change detection (BF-02/MJ-07).""" - extensions = {'.wav', '.aif', '.aiff', '.mp3'} - file_count = 0 - latest_mtime = 0.0 - try: - for ext in extensions: - for f in self.library_dir.rglob('*' + ext): - file_count += 1 - try: - mtime = f.stat().st_mtime - if mtime > latest_mtime: - latest_mtime = mtime - except OSError: - pass - for f in self.library_dir.rglob('*' + ext.upper()): - file_count += 1 - try: - mtime = f.stat().st_mtime - if mtime > latest_mtime: - latest_mtime = mtime - except OSError: - pass - except Exception: - pass - return {'file_count': file_count, 'latest_mtime': latest_mtime} - - def _load_or_build_index(self): - if self.index_file.exists(): - logger.info("Loading existing vector index...") - try: - with open(self.index_file, 'r', encoding='utf-8') as f: - data = json.load(f) - self.metadata = data.get('metadata', []) - - # BF-02/MJ-07: Check library fingerprint for auto-rebuild - stored_fp = data.get('library_fingerprint', {}) - current_fp = self._get_library_fingerprint() - stored_count = stored_fp.get('file_count', 0) - current_count = current_fp.get('file_count', 0) - if current_count != stored_count and stored_count > 0: - logger.info(f"Library changed ({stored_count} -> {current_count} files). Rebuilding index...") - self._build_index() - return - - if HAS_ML and 'embeddings' in data: - self.embeddings = np.array(data['embeddings']) - else: - logger.warning("No embeddings found in loaded index.") - except Exception as e: - logger.error(f"Failed to load index: {e}") - self._build_index() - else: - self._build_index() - - def _build_index(self): - logger.info(f"Scanning library {self.library_dir} for new embeddings...") - extensions = {'.wav', '.aif', '.aiff', '.mp3'} - - files_to_process = [] - for ext in extensions: - files_to_process.extend(self.library_dir.rglob('*' + ext)) - files_to_process.extend(self.library_dir.rglob('*' + ext.upper())) - - if not files_to_process: - logger.warning(f"No audio files found in {self.library_dir} to embed.") - return - - # Get unique files - unique_files = list(set(str(f) for f in files_to_process)) - total_files = len(unique_files) - logger.info(f"Found {total_files} audio files to process") - - # Determine number of workers (use 50% of available CPUs) - num_workers = max(1, cpu_count() // 2) - logger.info(f"Using {num_workers} CPU cores for parallel processing (50% capacity)") - - # Prepare arguments for parallel processing - args_list = [(f, str(self.library_dir), self.skip_audio_analysis) for f in unique_files] - - # Process files in parallel using multiprocessing - texts_to_embed = [] - self.metadata = [] - - if not self.skip_audio_analysis and HAS_AUDIO_ANALYZER: - # Use multiprocessing with audio analysis - logger.info("Starting parallel audio analysis...") - with Pool(processes=num_workers, initializer=_init_worker) as pool: - results = pool.map(_process_single_file, args_list) - - for metadata, description in results: - self.metadata.append(metadata) - texts_to_embed.append(description) - else: - # Fallback to sequential processing (no audio analysis) - logger.info("Processing files sequentially (audio analysis disabled)...") - import soundfile as sf - full_song_keywords = {'original mix', 'extended mix', 'full mix', 'edit', 'master', '320kbps', 'remix'} - - for i, f in enumerate(unique_files): - f = Path(f) - if (i + 1) % max(1, total_files // 20) == 0 or (i + 1) == total_files: - logger.info(f"Processing files: {i+1}/{total_files} ({(i+1)/total_files*100:.1f}%)") - - name = f.stem - clean_name = name.replace('_', ' ').replace('-', ' ').lower() - - duration = 0.0 - try: - info = sf.info(str(f)) - duration = info.duration - except Exception: - duration = -1.0 - - is_full_song = duration > 45.0 - - try: - rel_path = f.relative_to(self.library_dir) - path_context = " ".join(rel_path.parts[:-1]).lower() - except ValueError: - path_context = "" - - description = f"{clean_name} {path_context}" - texts_to_embed.append(description) - - self.metadata.append({ - 'path': str(f), - 'name': name, - 'description': description, - 'duration': duration, - 'is_full_song': is_full_song, - 'key': None, - 'key_confidence': 0.0, - 'spectral_centroid': None, - 'is_harmonic': None - }) - - if HAS_ML and self.model: - logger.info(f"Generating vectors for {len(texts_to_embed)} samples. This might take a moment...") - embeddings = self.model.encode(texts_to_embed) - self.embeddings = embeddings - - # BF-02: Save fingerprint alongside embeddings for auto-rebuild detection - fingerprint = self._get_library_fingerprint() - - # Save the vectors - with open(self.index_file, 'w', encoding='utf-8') as f: - json.dump({ - 'metadata': self.metadata, - 'embeddings': embeddings.tolist(), - 'library_fingerprint': fingerprint - }, f) - logger.info(f"Saved {len(self.metadata)} embeddings to {self.index_file}.") - else: - logger.error("ML libraries not installed. Run 'pip install sentence-transformers scikit-learn numpy'") - - # MJ-06: Genre keyword expansion for richer semantic search - GENRE_SEARCH_TERMS = { - 'tech-house': ['groovy', 'driving', 'punchy', 'jackin', 'swinging', 'hypnotic', 'bouncy'], - 'house': ['deep', 'soulful', 'warm', 'classic', 'funky'], - 'techno': ['industrial', 'dark', 'raw', 'hypnotic', 'peak-time', 'acid'], - 'trance': ['uplifting', 'ethereal', 'driving', 'euphoric'], - 'deep-house': ['deep', 'chill', 'smooth', 'laidback', 'warm'], - 'minimal': ['minimal', 'sparse', 'subtle', 'clean'], - 'drum-and-bass': ['heavy', 'dark', 'neuro', 'rolling', 'aggressive'], - } - - def enrich_query_with_genre(self, query: str, genre: str = "") -> str: - """MJ-06: Enrich a search query with genre-specific terms.""" - genre_lower = (genre or "").lower().strip() - terms = self.GENRE_SEARCH_TERMS.get(genre_lower, []) - if terms: - # Pick 2 random genre terms to enrich without overwhelming - import random as _rng - picked = _rng.sample(terms, min(2, len(terms))) - enriched = f"{query} {' '.join(picked)}" - logger.info(f"Enriched query for '{genre_lower}': '{query}' -> '{enriched}'") - return enriched - return query - - def semantic_search(self, query: str, limit: int = 5, max_duration: float = 0.0, genre: str = "") -> List[Dict]: - """ - Returns a list of metadata dicts sorted by semantic relevance down to the limit. - Fallback to basic substring matching if ML is unavailable. - - Args: - query: Semantic search terms - limit: Max results to return - max_duration: If > 0, filter out samples longer than this value - genre: Optional genre to enrich the search query (MJ-06) - """ - if not HAS_ML or self.model is None or len(self.embeddings) == 0: - logger.warning("ML unavailable, falling back to substring search.") - return self._fallback_search(query, limit, max_duration) - - # MJ-06: Enrich query with genre terms - effective_query = self.enrich_query_with_genre(query, genre) if genre else query - - logger.info(f"Performing semantic search for: '{effective_query}' (max_duration={max_duration})") - query_emb = self.model.encode([effective_query]) - - # Calculate cosine similarity between query and all stored embeddings - similarities = cosine_similarity(query_emb, self.embeddings)[0] - - # Apply duration and full-song penalties/filtering - adjusted_similarities = similarities.copy() - - for i, meta in enumerate(self.metadata): - # Filter out if it exceeds max_duration (if specified) - if max_duration > 0 and (meta.get('duration', 0) > max_duration or meta.get('duration', 0) < 0): - adjusted_similarities[i] = -1.0 - continue - - # Filter out explicit full songs - if meta.get('is_full_song', False) and max_duration > 0: - adjusted_similarities[i] = -1.0 - continue - - # Small penalty for longer samples if no max_duration specified - # to prioritize snippets over loops - if max_duration == 0 and meta.get('duration', 0) > 10.0: - adjusted_similarities[i] *= 0.9 - - # Get top indices from adjusted scores - top_indices = np.argsort(adjusted_similarities)[::-1][:limit] - - results = [] - for idx in top_indices: - score = float(adjusted_similarities[idx]) - if score < 0: # All remaining candidates are invalid - break - - meta = self.metadata[idx].copy() - meta['score'] = score - results.append(meta) - - return results - - def _fallback_search(self, query: str, limit: int = 5, max_duration: float = 0.0) -> List[Dict]: - query = query.lower() - scored = [] - for m in self.metadata: - # Duration filter - if max_duration > 0 and (m.get('duration', 0) > max_duration or m.get('duration', 0) < 0): - continue - if m.get('is_full_song', False) and max_duration > 0: - continue - - score = 0 - if query in m['name'].lower(): - score += 10 - if query in m['description'].lower(): - score += 5 - - if score > 0: - scored.append((score, m)) - - scored.sort(key=lambda x: x[0], reverse=True) - return [m for s, m in scored[:limit]] - -if __name__ == "__main__": - import sys - import argparse - - parser = argparse.ArgumentParser(description="Vector Manager for sample library indexing") - parser.add_argument("library_dir", nargs='?', help="Path to the sample library directory") - parser.add_argument("search_query", nargs='?', help="Optional search query to test") - parser.add_argument("--skip-audio-analysis", action="store_true", - help="Skip spectral audio analysis for faster rebuild (development mode)") - parser.add_argument("--rebuild", action="store_true", - help="Force rebuild of the index from scratch") - - args = parser.parse_args() - - if args.library_dir: - # Check if index exists and rebuild flag is set - index_file = Path(args.library_dir) / ".sample_embeddings.json" - if args.rebuild and index_file.exists(): - logger.info(f"Removing existing index for rebuild: {index_file}") - index_file.unlink() - - vm = VectorManager(args.library_dir, skip_audio_analysis=args.skip_audio_analysis) - - if args.search_query: - res = vm.semantic_search(args.search_query) - print(f"Search Results for '{args.search_query}':") - for r in res: - print(f" Score: {r['score']:.3f}") - print(f" Name: {r['name']}") - print(f" Path: {r['path']}") - print(f" Key: {r.get('key', 'N/A')} (confidence: {r.get('key_confidence', 0):.2f})") - print(f" Spectral Centroid: {r.get('spectral_centroid', 'N/A')}") - print(f" Is Harmonic: {r.get('is_harmonic', 'N/A')}") - print() - else: - # Print summary of the loaded index - print(f"\nIndex Summary:") - print(f" Total samples: {len(vm.metadata)}") - # Count samples with spectral data - with_key = sum(1 for m in vm.metadata if m.get('key') is not None) - with_centroid = sum(1 for m in vm.metadata if m.get('spectral_centroid') is not None) - print(f" Samples with key detected: {with_key}") - print(f" Samples with spectral centroid: {with_centroid}") - else: - print("Usage: python vector_manager.py [search_query] [--skip-audio-analysis] [--rebuild]") - print("\nOptions:") - print(" --skip-audio-analysis Skip spectral analysis for faster rebuild") - print(" --rebuild Force rebuild index from scratch") diff --git a/AbletonMCP_AI_BAK_20260328_200801/README.md b/AbletonMCP_AI_BAK_20260328_200801/README.md deleted file mode 100644 index e5570dc..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/README.md +++ /dev/null @@ -1,222 +0,0 @@ -# AbletonMCP-AI - -Sistema hibrido para controlar Ableton Live 12 desde MCP y generar proyectos musicales complejos, orientados a Arrangement View. - -Combina: - -- un Remote Script dentro de Live -- un servidor MCP en Python (52+ tools) -- seleccion de samples desde biblioteca local con busqueda semantica ML -- reconstruccion guiada por referencias -- fallback de audio y capas MIDI/instrumentos -- buses, returns y snapshots de mezcla por seccion -- mezcla harmonica Camelot wheel para DJ sets -- generacion Tech House DJ-ready con intro/outro extendidas - -Esta es la snapshot del proyecto al 2026-03-28. - -## Estado actual - -El sistema ya puede: - -- generar proyectos completos en Arrangement View con samples de la biblioteca local -- crear estructura, tracks, scenes, cue points y guide track -- combinar MIDI, instrumentos stock y audio de biblioteca local (827 samples indexados) -- analizar un track de referencia y reconstruir un resultado original inspirado en ese material -- materializar capas `AUDIO ...` con samples reales (kick, bass, synth, vocal, FX, etc.) -- aplicar snapshots por seccion a tracks y returns durante el commit Session -> Arrangement -- operar con returns desde el runtime y desde el MCP -- buses de mezcla (DRUM BUS, BASS BUS, MUSIC WIDE, VOCAL BUS, FX WASH) -- capas derivadas (RESAMPLE REVERSE FX, RISER, DOWNLIFTER, STUTTER) -- generar estructuras DJ-ready con intro/outro de 32 compases para beatmatching -- mezcla harmonica con Camelot wheel (compatible keys, sugerencias de transicion) -- auto-descubrir tracks de referencia desde `librerias/reference/` -- previsualizar blueprints sin crear nada en Ableton -- regenerar secciones individuales -- persistir historia de generaciones y diversidad de samples entre sesiones -- busqueda semantica enriquecida por genero (tech-house, house, techno, trance, etc.) -- auto-reindexar la biblioteca cuando cambian los archivos -- validar automaticamente el set post-generacion - -## Arquitectura resumida - -1. `__init__.py` - Remote Script principal. Vive dentro de Ableton, abre el socket TCP y ejecuta comandos sobre la API de Live. -2. `MCP_Server/server.py` - Servidor MCP/FastMCP. Expone tools, normaliza aliases y habla con el Remote Script. -3. `MCP_Server/song_generator.py` - Generador musical. Construye blueprint de tracks, sections, performance, locators y returns. -4. `MCP_Server/reference_listener.py` - Escucha el audio de referencia y arma un plan de reconstruccion usando la biblioteca local. -5. `MCP_Server/sample_manager.py`, `sample_selector.py`, `audio_analyzer.py` - Indexado, busqueda, scoring y analisis de samples. -6. `MaxForLive/` - Devices `.amxd` para la ruta hibrida con M4L. - -## Layout del repo - -```text -AbletonMCP_AI/ -|-- __init__.py -|-- Remote_Script.py -|-- start_server.bat -|-- .mcp.json -|-- README.md -|-- CLAUDE.md -|-- MaxForLive/ -| |-- AbletonMCP_Engine.amxd -| |-- AbletonMCP_Engine.maxpat -| `-- AbletonMCP_SamplerPro.amxd -|-- MCP_Server/ -| |-- server.py -| |-- song_generator.py -| |-- reference_listener.py -| |-- audio_analyzer.py -| |-- sample_manager.py -| |-- sample_selector.py -| |-- sample_index.py -| |-- socket_smoke_test.py -| |-- template_analyzer.py -| |-- ABLETUNES_TEMPLATE_NOTES.md -| `-- requirements.txt -`-- docs/ - |-- AI_HANDOFF.md - |-- ARCHITECTURE.md - |-- GPU_DIRECTML.md - |-- MCP_TOOLS.md - |-- PROJECT_CONTEXT.md - |-- REMOTE_PROTOCOL.md - `-- SETUP_WINDOWS.md -``` - -## Documentacion - -Leer primero: - -- [CLAUDE.md](CLAUDE.md) - handoff amplio, cronologia completa, estado real, paths y notas operativas -- [AI_HANDOFF](docs/AI_HANDOFF.md) - handoff corto y operativo -- [PROJECT_CONTEXT](docs/PROJECT_CONTEXT.md) - direccion de producto y lecciones aprendidas - -- [Arquitectura](docs/ARCHITECTURE.md) -- [Setup en Windows + Ableton](docs/SETUP_WINDOWS.md) -- [Tools MCP](docs/MCP_TOOLS.md) -- [Protocolo del Remote Script](docs/REMOTE_PROTOCOL.md) -- [GPU DirectML](docs/GPU_DIRECTML.md) -- [Notas del analisis de templates Abletunes](MCP_Server/ABLETUNES_TEMPLATE_NOTES.md) - -## Quick start - -### 1. Clonar y colocar en la carpeta de Ableton - -```powershell -# Clonar el repo -git clone https://gitea.cbcren.online/renato97/ableton-mcp-ai.git - -# Copiar a la carpeta de MIDI Remote Scripts -cp -r ableton-mcp-ai "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI" -``` - -### 2. Instalar dependencias Python - -```powershell -cd "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server" -python -m pip install -r requirements.txt -``` - -### 3. Seleccionar el Control Surface en Live - -- Abrir Ableton Live 12. -- Ir a `Preferences > Link/Tempo/MIDI`. -- Elegir `AbletonMCP_AI` como `Control Surface`. - -### 4. Levantar el servidor MCP - -```powershell -cd "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI" -python MCP_Server/server.py -``` - -O: - -```powershell -start_server.bat -``` - -### 5. Probar conexion - -```powershell -cd "C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\MCP_Server" -python socket_smoke_test.py -``` - -## Ubicaciones externas esperadas - -Este repo no incluye bibliotecas pesadas ni material generado. El stack espera estos recursos fuera del repo: - -- biblioteca principal de samples (organizada por categorias): - `..\librerias\organized_samples` (827 samples indexados) -- biblioteca raw original: - `..\librerias\all_tracks` -- vector store para matching ML: - `..\librerias\vector_store` -- Ableton User Library para instalar el sampler M4L: - `%USERPROFILE%\Documents\Ableton\User Library` -- referencias MP3/WAV que se quieran analizar: - `..\sample` -- proyectos `.als`, renders y stems - -## Flujo recomendado - -1. Resetear el set. -2. Generar un track desde MCP o por socket. -3. Validar que el commit termine en Arrangement View. -4. Revisar audio tracks `AUDIO ...` y returns. -5. Ajustar perfiles, matching y snapshots. - -## Comandos utiles - -Generacion completa: - -```text -generate_track(genre="tech-house", style="latin-industrial", bpm=0, key="", structure="standard") -generate_track(genre="tech-house", style="groovy", bpm=126, key="Am", structure="tech-house-dj") -generate_song(genre="tech-house", style="latin-industrial", bpm=0, key="", structure="club") -``` - -DJ / Harmonic mixing: - -```text -get_harmonic_keys(key="Am") -get_compatible_keys(key="Am") -export_stems_config() -discover_reference_track() -get_reference_suggestions() -``` - -Utilidades de generacion: - -```text -preview_generation(genre="tech-house", style="groovy", bpm=126, key="Am", structure="tech-house-dj") -regenerate_section(section_name="DROP A") -get_generation_history() -``` - -Transporte: - -```text -start_playback() -stop_playback() -set_tempo(126) -``` - -Samples: - -```text -search_samples("kick", category="kick", limit=10) -advanced_search_samples(query="vocal", category="vocals", bpm=128, key="F#m") -analyze_audio_file("C:\\ruta\\track.mp3") -``` - -## Licencia - -Sin licencia publicada por ahora. Tratar este repo como privado/interno hasta definirla. diff --git a/AbletonMCP_AI_BAK_20260328_200801/Remote_Script.py b/AbletonMCP_AI_BAK_20260328_200801/Remote_Script.py deleted file mode 100644 index d78f91c..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/Remote_Script.py +++ /dev/null @@ -1,943 +0,0 @@ -""" -AbletonMCP AI - Remote Script para Ableton Live 12 -Integración completa con MCP para generación musical por IA - -Este script debe copiarse a: -C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\ - -Y luego seleccionarse en Preferencias > Link/Tempo/MIDI > Control Surface -""" -from __future__ import absolute_import, print_function, unicode_literals - -from _Framework.ControlSurface import ControlSurface -import socket -import json -import threading -import time -import traceback -import os -import hashlib - -# Python 2/3 compatibility -try: - import queue -except ImportError: - pass - -try: - string_types = basestring -except NameError: - string_types = str - -# Configuración -DEFAULT_PORT = 9877 -HOST = "localhost" -CONFIG_FILE = r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\AbletonMCP_AI\track_config.json" - - -def create_instance(c_instance): - """Crea y retorna la instancia del script""" - return AbletonMCP_AI(c_instance) - - -class AbletonMCP_AI(ControlSurface): - """ - Remote Script para integración MCP + AI con Ableton Live 12 - - Características: - - Servidor socket para comunicación con MCP Server - - Generación de tracks MIDI con patrones automáticos - - Carga de samples vía browser - - Integración con análisis de audio por IA - """ - - def __init__(self, c_instance): - ControlSurface.__init__(self, c_instance) - self.log_message("=" * 60) - self.log_message("AbletonMCP AI - Inicializando...") - self.log_message("=" * 60) - - # Referencia a la canción - self._song = self.song() - - # Servidor socket - self.server = None - self.client_threads = [] - self.server_thread = None - self.running = False - - # Config watcher para generación automática - self._last_config_hash = None - self._config_watcher_thread = None - self._config_watcher_running = False - - # Iniciar servidor - self.start_server() - - # Iniciar watcher de configuración - self.start_config_watcher() - - self.log_message("AbletonMCP AI inicializado correctamente") - self.show_message("AbletonMCP AI: Listo en puerto " + str(DEFAULT_PORT)) - - def disconnect(self): - """Llamado cuando Ableton cierra o se remueve el script""" - self.log_message("AbletonMCP AI desconectando...") - self.running = False - self._config_watcher_running = False - - # Detener servidor - if self.server: - try: - self.server.close() - except Exception: - pass - - # Esperar threads - if self.server_thread and self.server_thread.is_alive(): - self.server_thread.join(1.0) - - if self._config_watcher_thread and self._config_watcher_thread.is_alive(): - self._config_watcher_thread.join(0.5) - - ControlSurface.disconnect(self) - self.log_message("AbletonMCP AI desconectado") - - # ========================================================================= - # SERVIDOR SOCKET - # ========================================================================= - - def start_server(self): - """Inicia el servidor socket en un thread separado""" - try: - self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - self.server.bind((HOST, DEFAULT_PORT)) - self.server.listen(5) - - self.running = True - self.server_thread = threading.Thread(target=self._server_thread) - self.server_thread.daemon = True - self.server_thread.start() - - self.log_message("Servidor socket iniciado en puerto " + str(DEFAULT_PORT)) - except Exception as e: - self.log_message("Error iniciando servidor: " + str(e)) - self.show_message("AbletonMCP AI Error: " + str(e)) - - def _server_thread(self): - """Thread principal del servidor - maneja conexiones""" - try: - self.server.settimeout(1.0) - - while self.running: - try: - client, address = self.server.accept() - self.log_message("Conexión aceptada de " + str(address)) - - # Manejar cliente en thread separado - client_thread = threading.Thread( - target=self._handle_client, - args=(client,) - ) - client_thread.daemon = True - client_thread.start() - - self.client_threads.append(client_thread) - - # Limpiar threads terminados - self.client_threads = [t for t in self.client_threads if t.is_alive()] - - except socket.timeout: - continue - except Exception as e: - if self.running: - self.log_message("Error servidor: " + str(e)) - time.sleep(0.5) - - except Exception as e: - self.log_message("Error thread servidor: " + str(e)) - - def _handle_client(self, client): - """Maneja comunicación con un cliente conectado""" - client.settimeout(None) - buffer = '' - - try: - while self.running: - try: - data = client.recv(8192) - - if not data: - self.log_message("Cliente desconectado") - break - - # Acumular en buffer - try: - buffer += data.decode('utf-8') - except AttributeError: - buffer += data - - # Intentar parsear JSON - try: - command = json.loads(buffer) - buffer = '' - - self.log_message("Comando recibido: " + str(command.get("type", "unknown"))) - - # Procesar comando - response = self._process_command(command) - - # Enviar respuesta - try: - client.sendall(json.dumps(response).encode('utf-8')) - except AttributeError: - client.sendall(json.dumps(response)) - - except ValueError: - # Datos incompletos, esperar más - continue - - except Exception as e: - self.log_message("Error manejando cliente: " + str(e)) - error_response = {"status": "error", "message": str(e)} - try: - client.sendall(json.dumps(error_response).encode('utf-8')) - except Exception: - pass - break - - finally: - try: - client.close() - except Exception: - pass - - # ========================================================================= - # CONFIG WATCHER - Generación automática - # ========================================================================= - - def start_config_watcher(self): - """Inicia el watcher de configuración para generación automática""" - self._config_watcher_running = True - self._config_watcher_thread = threading.Thread(target=self._config_watcher_loop) - self._config_watcher_thread.daemon = True - self._config_watcher_thread.start() - self.log_message("Config watcher iniciado") - - def _config_watcher_loop(self): - """Loop que monitorea cambios en el archivo de configuración""" - while self._config_watcher_running: - try: - if os.path.exists(CONFIG_FILE): - with open(CONFIG_FILE, 'r') as f: - content = f.read() - - h = hashlib.md5(content.encode()).hexdigest() - if h != self._last_config_hash: - self._last_config_hash = h - self.log_message("Config cambiado - generando track...") - - try: - config = json.loads(content) - # Solo procesar si tiene flag 'auto_generate' - if config.get('auto_generate', False): - self._generate_from_config(config) - except Exception as e: - self.log_message("Error generando desde config: " + str(e)) - self.log_message(traceback.format_exc()) - - time.sleep(1.0) # Revisar cada segundo - - except Exception as e: - self.log_message("Error en config watcher: " + str(e)) - time.sleep(2.0) - - def _generate_from_config(self, config): - """Genera un track completo desde una configuración""" - try: - self.show_message("AI: Generando " + config.get('name', 'Track')) - - # 1. Limpiar proyecto existente - self._clear_all_tracks() - - # 2. Setear BPM - bpm = config.get('bpm', 128) - self._song.tempo = bpm - - # 3. Crear tracks según configuración - tracks_config = config.get('tracks', []) - - for idx, track_cfg in enumerate(tracks_config): - track_type = track_cfg.get('type', 'midi') - name = track_cfg.get('name', 'Track ' + str(idx)) - - if track_type == 'midi': - self._song.create_midi_track(idx) - elif track_type == 'audio': - self._song.create_audio_track(idx) - - track = self._song.tracks[idx] - track.name = name - - # Setear color si existe - if 'color' in track_cfg: - track.color = track_cfg['color'] - - # Crear clip con notas si existe configuración - if 'clip' in track_cfg: - clip_cfg = track_cfg['clip'] - slot_idx = clip_cfg.get('slot', 0) - length = clip_cfg.get('length', 4.0) - - # Asegurar que existan suficientes scenes - while len(self._song.scenes) <= slot_idx: - self._song.create_scene(-1) - - clip_slot = track.clip_slots[slot_idx] - clip_slot.create_clip(length) - - # Agregar notas - if 'notes' in clip_cfg: - clip = clip_slot.clip - for note in clip_cfg['notes']: - pitch = note.get('pitch', 60) - start = note.get('start', 0.0) - duration = note.get('duration', 0.25) - velocity = note.get('velocity', 100) - clip.add_new_note((pitch, start, duration, velocity, False)) - - # Cargar instrumento si se especifica - if 'instrument' in track_cfg: - instrument_name = track_cfg['instrument'] - # Usar browser para cargar - self._load_instrument_by_name(track, instrument_name) - - self.show_message("AI: Track generado exitosamente!") - self.log_message("Generación completada: " + str(len(tracks_config)) + " tracks") - - except Exception as e: - self.log_message("Error en generación: " + str(e)) - self.log_message(traceback.format_exc()) - self.show_message("AI Error: " + str(e)) - - def _clear_all_tracks(self): - """Elimina todos los tracks existentes""" - try: - while len(self._song.tracks) > 0: - self._song.delete_track(len(self._song.tracks) - 1) - except Exception as e: - self.log_message("Error limpiando tracks: " + str(e)) - - def _load_instrument_by_name(self, track, name): - """Carga un instrumento en el track por nombre""" - try: - browser = self.application().browser - - # Buscar en categorías de instrumentos - if hasattr(browser, 'instruments'): - for item in self._search_browser_items(browser.instruments, name): - try: - browser.load_item(item) - self.log_message("Instrumento cargado: " + name) - return True - except Exception as e: - self.log_message("Error cargando instrumento: " + str(e)) - - return False - except Exception as e: - self.log_message("Error buscando instrumento: " + str(e)) - return False - - def _search_browser_items(self, root, name, depth=0, max_depth=5): - """Busca items en el browser recursivamente""" - if depth > max_depth or root is None: - return [] - - results = [] - try: - # Verificar si el nombre coincide - item_name = getattr(root, 'name', '').lower() - if name.lower() in item_name or item_name in name.lower(): - results.append(root) - - # Buscar en hijos - if hasattr(root, 'children'): - for child in root.children: - results.extend(self._search_browser_items(child, name, depth + 1, max_depth)) - except Exception: - pass - - return results - - # ========================================================================= - # PROCESAMIENTO DE COMANDOS - # ========================================================================= - - def _process_command(self, command): - """Procesa un comando recibido y retorna respuesta""" - command_type = command.get("type", "") - params = command.get("params", {}) - - try: - # Comandos de información - if command_type == "get_session_info": - return self._cmd_get_session_info() - - elif command_type == "get_track_info": - return self._cmd_get_track_info(params) - - elif command_type == "get_tracks": - return self._cmd_get_tracks() - - # Comandos de tracks - elif command_type == "create_midi_track": - return self._cmd_create_midi_track(params) - - elif command_type == "create_audio_track": - return self._cmd_create_audio_track(params) - - elif command_type == "set_track_name": - return self._cmd_set_track_name(params) - - elif command_type == "set_track_volume": - return self._cmd_set_track_volume(params) - - elif command_type == "set_track_pan": - return self._cmd_set_track_pan(params) - - elif command_type == "set_track_mute": - return self._cmd_set_track_mute(params) - - elif command_type == "set_track_solo": - return self._cmd_set_track_solo(params) - - elif command_type == "set_track_color": - return self._cmd_set_track_color(params) - - # Comandos de clips - elif command_type == "create_clip": - return self._cmd_create_clip(params) - - elif command_type == "add_notes_to_clip": - return self._cmd_add_notes_to_clip(params) - - elif command_type == "set_clip_name": - return self._cmd_set_clip_name(params) - - elif command_type == "set_clip_envelope": - return self._cmd_set_clip_envelope(params) - - elif command_type == "fire_clip": - return self._cmd_fire_clip(params) - - elif command_type == "stop_clip": - return self._cmd_stop_clip(params) - - # Comandos de transporte - elif command_type == "set_tempo": - return self._cmd_set_tempo(params) - - elif command_type == "start_playback": - return self._cmd_start_playback() - - elif command_type == "stop_playback": - return self._cmd_stop_playback() - - # Comandos de escenas - elif command_type == "create_scene": - return self._cmd_create_scene(params) - - elif command_type == "set_scene_name": - return self._cmd_set_scene_name(params) - - elif command_type == "fire_scene": - return self._cmd_fire_scene(params) - - # Comandos de dispositivos - elif command_type == "load_instrument_or_effect": - return self._cmd_load_instrument(params) - - elif command_type == "set_device_parameter": - return self._cmd_set_device_parameter(params) - - # Comando de generación AI - elif command_type == "generate_track": - return self._cmd_generate_track(params) - - else: - return {"status": "error", "message": "Comando desconocido: " + command_type} - - except Exception as e: - self.log_message("Error procesando comando " + command_type + ": " + str(e)) - self.log_message(traceback.format_exc()) - return {"status": "error", "message": str(e)} - - # ========================================================================= - # IMPLEMENTACIÓN DE COMANDOS - # ========================================================================= - - def _cmd_get_session_info(self): - """Retorna información de la sesión actual""" - return { - "status": "success", - "result": { - "tempo": self._song.tempo, - "signature_numerator": self._song.signature_numerator, - "signature_denominator": self._song.signature_denominator, - "is_playing": self._song.is_playing, - "current_song_time": self._song.current_song_time, - "loop_start": self._song.loop_start, - "loop_length": self._song.loop_length, - "num_tracks": len(self._song.tracks), - "num_scenes": len(self._song.scenes), - "num_return_tracks": len(self._song.return_tracks) - } - } - - def _cmd_get_track_info(self, params): - """Retorna información de un track específico""" - idx = params.get("track_index", 0) - if idx < 0 or idx >= len(self._song.tracks): - return {"status": "error", "message": "Track index fuera de rango"} - - track = self._song.tracks[idx] - - # Determinar tipo de track - track_type = "unknown" - if track.has_midi_input: - track_type = "midi" - elif track.has_audio_input: - track_type = "audio" - - return { - "status": "success", - "result": { - "index": idx, - "name": track.name, - "type": track_type, - "color": track.color, - "mute": track.mute, - "solo": track.solo, - "arm": track.arm, - "volume": track.mixer_device.volume.value if track.mixer_device else 0.85, - "pan": track.mixer_device.panning.value if track.mixer_device else 0.0, - "num_clips": len(track.clip_slots), - "num_devices": len(track.devices) - } - } - - def _cmd_get_tracks(self): - """Retorna lista de todos los tracks""" - tracks = [] - for i, track in enumerate(self._song.tracks): - track_type = "midi" if track.has_midi_input else "audio" if track.has_audio_input else "unknown" - tracks.append({ - "index": i, - "name": track.name, - "type": track_type, - "color": track.color, - "mute": track.mute, - "solo": track.solo - }) - - return {"status": "success", "result": tracks} - - def _cmd_create_midi_track(self, params): - """Crea un track MIDI""" - index = params.get("index", -1) - self._song.create_midi_track(index) - return {"status": "success", "result": {"message": "MIDI track creado", "index": index}} - - def _cmd_create_audio_track(self, params): - """Crea un track de audio""" - index = params.get("index", -1) - self._song.create_audio_track(index) - return {"status": "success", "result": {"message": "Audio track creado", "index": index}} - - def _cmd_set_track_name(self, params): - """Setea el nombre de un track""" - idx = params.get("track_index", 0) - name = params.get("name", "Track") - self._song.tracks[idx].name = name - return {"status": "success", "result": {"message": "Nombre actualizado", "name": name}} - - def _cmd_set_track_volume(self, params): - """Setea el volumen de un track""" - idx = params.get("track_index", 0) - volume = params.get("volume", 0.85) - track = self._song.tracks[idx] - if track.mixer_device and track.mixer_device.volume: - track.mixer_device.volume.value = volume - return {"status": "success"} - - def _cmd_set_track_pan(self, params): - """Setea el pan de un track""" - idx = params.get("track_index", 0) - pan = params.get("pan", 0.0) - track = self._song.tracks[idx] - if track.mixer_device and track.mixer_device.panning: - track.mixer_device.panning.value = pan - return {"status": "success"} - - def _cmd_set_track_mute(self, params): - """Setea el mute de un track""" - idx = params.get("track_index", 0) - mute = params.get("mute", True) - track = self._song.tracks[idx] - current_mute = track.mute - if current_mute != mute: - track.mute = mute - return {"status": "success", "result": {"mute": track.mute, "track_index": idx}} - - def _cmd_set_track_solo(self, params): - """Setea el solo de un track""" - idx = params.get("track_index", 0) - solo = params.get("solo", True) - self._song.tracks[idx].solo = solo - return {"status": "success"} - - def _cmd_set_track_color(self, params): - """Setea el color de un track""" - idx = params.get("track_index", 0) - color = params.get("color", 0) - self._song.tracks[idx].color = color - return {"status": "success"} - - def _cmd_create_clip(self, params): - """Crea un clip en un slot""" - track_idx = params.get("track_index", 0) - clip_idx = params.get("clip_index", 0) - length = params.get("length", 4.0) - - track = self._song.tracks[track_idx] - - # Asegurar que existan suficientes scenes - while len(self._song.scenes) <= clip_idx: - self._song.create_scene(-1) - - clip_slot = track.clip_slots[clip_idx] - clip_slot.create_clip(length) - - return {"status": "success", "result": {"message": "Clip creado"}} - - def _cmd_add_notes_to_clip(self, params): - """Agrega notas a un clip MIDI""" - track_idx = params.get("track_index", 0) - clip_idx = params.get("clip_index", 0) - notes = params.get("notes", []) - - track = self._song.tracks[track_idx] - clip_slot = track.clip_slots[clip_idx] - - if not clip_slot.has_clip: - return {"status": "error", "message": "No hay clip en este slot"} - - clip = clip_slot.clip - - for note in notes: - pitch = note.get("pitch", 60) - start = note.get("start", 0.0) - duration = note.get("duration", 0.25) - velocity = note.get("velocity", 100) - clip.add_new_note((pitch, start, duration, velocity, False)) - - return {"status": "success", "result": {"num_notes_added": len(notes)}} - - def _cmd_set_clip_name(self, params): - """Setea el nombre de un clip""" - track_idx = params.get("track_index", 0) - clip_idx = params.get("clip_index", 0) - name = params.get("name", "Clip") - - clip_slot = self._song.tracks[track_idx].clip_slots[clip_idx] - if clip_slot.has_clip: - clip_slot.clip.name = name - - return {"status": "success"} - - def _cmd_fire_clip(self, params): - """Dispara un clip""" - track_idx = params.get("track_index", 0) - clip_idx = params.get("clip_index", 0) - - clip_slot = self._song.tracks[track_idx].clip_slots[clip_idx] - clip_slot.fire() - - return {"status": "success"} - - def _cmd_stop_clip(self, params): - """Detiene un clip""" - track_idx = params.get("track_index", 0) - clip_idx = params.get("clip_index", 0) - - clip_slot = self._song.tracks[track_idx].clip_slots[clip_idx] - clip_slot.stop() - - return {"status": "success"} - - def _cmd_set_tempo(self, params): - """Setea el BPM""" - tempo = params.get("tempo", 120.0) - self._song.tempo = tempo - return {"status": "success", "result": {"tempo": tempo}} - - def _cmd_start_playback(self): - """Inicia reproducción""" - self._song.start_playing() - return {"status": "success"} - - def _cmd_stop_playback(self): - """Detiene reproducción""" - self._song.stop_playing() - return {"status": "success"} - - def _cmd_create_scene(self, params): - """Crea una scene""" - index = params.get("index", -1) - self._song.create_scene(index) - return {"status": "success"} - - def _cmd_set_scene_name(self, params): - """Setea el nombre de una scene""" - idx = params.get("scene_index", 0) - name = params.get("name", "Scene") - self._song.scenes[idx].name = name - return {"status": "success"} - - def _cmd_fire_scene(self, params): - """Dispara una scene""" - idx = params.get("scene_index", 0) - scene = self._song.scenes[idx] - scene.fire() - - if not self._song.is_playing: - self._song.start_playing() - - return {"status": "success"} - - def _cmd_load_instrument(self, params): - """Carga un instrumento en un track""" - track_idx = params.get("track_index", 0) - name = params.get("name", "") - - track = self._song.tracks[track_idx] - success = self._load_instrument_by_name(track, name) - - if success: - return {"status": "success", "result": {"message": "Instrumento cargado"}} - else: - return {"status": "error", "message": "No se pudo cargar el instrumento"} - - def _cmd_set_device_parameter(self, params): - """Setea un parámetro de dispositivo""" - track_idx = params.get("track_index", 0) - device_idx = params.get("device_index", 0) - param_idx = params.get("parameter_index", 0) - value = params.get("value", 0.0) - - track = self._song.tracks[track_idx] - device = track.devices[device_idx] - param = device.parameters[param_idx] - param.value = value - - return {"status": "success"} - - def _cmd_generate_track(self, params): - """Comando principal de generación de tracks""" - # Este comando delega a _generate_from_config - # pero puede ser llamado directamente vía socket - try: - self._generate_from_config(params) - return {"status": "success", "result": {"message": "Track generado exitosamente"}} - except Exception as e: - return {"status": "error", "message": str(e)} - - def _cmd_set_clip_envelope(self, params): - """Setea un envelope (volume, pan, send) en un clip con puntos de automatización""" - track_idx = params.get("track_index", 0) - clip_idx = params.get("clip_index", 0) - envelope_name = params.get("envelope", "volume") # volume, pan, send - points = params.get("points", []) - - track = self._song.tracks[track_idx] - clip_slot = track.clip_slots[clip_idx] - - if not clip_slot.has_clip: - return {"status": "error", "message": "No hay clip en este slot"} - - clip = clip_slot.clip - - # Obtener el envelope correcto - if envelope_name == "volume": - envelope = clip.volume_envelope - elif envelope_name == "pan": - envelope = clip.pan_envelope - elif envelope_name == "send": - send_idx = params.get("send_index", 0) - if send_idx < len(track.mixer_device.sends): - envelope = track.mixer_device.sends[send_idx].envelope - else: - return {"status": "error", "message": "Send index fuera de rango"} - else: - return {"status": "error", "message": "Envelope type desconocido: " + envelope_name} - - # Limpiar puntos existentes si se especifica - clear_existing = params.get("clear_existing", False) - if clear_existing: - while len(envelope.points) > 0: - envelope.delete_point(len(envelope.points) - 1) - - # Agregar puntos de automatización desde el array de puntos - if points: - for point in points: - if isinstance(point, dict): - time_pos = point.get("time", 0.0) - value = point.get("value", 0.0) - envelope.add_new_point(time_pos, value) - return {"status": "success", "result": {"message": "Envelope seteado con puntos", "points_added": len(points)}} - else: - return {"status": "error", "message": "No se especificaron puntos de automatización"} - - def _cmd_calibrate_track_gain(self, params): - """Calibra el gain de un track basado en loudness""" - track_idx = params.get("track_index", 0) - target_loudness = params.get("target_loudness", -14.0) # LUFS target - measurement_window = params.get("measurement_window", 0.1) # segundos - - track = self._song.tracks[track_idx] - if not track.has_audio_input: - return {"status": "error", "message": "Track no es de audio"} - - # Obtener el peak volume actual - current_volume = track.mixer_device.volume.value - - # Calibrar para alcanzar el target (simplificado) - # En una implementación real, usaríamos análisis de loudness real - # Por ahora, ajustamos proporcionalmente - adjustment = target_loudness / -20.0 # Aproximación - new_volume = max(0.0, min(1.0, current_volume * adjustment)) - - track.mixer_device.volume.value = new_volume - - return { - "status": "success", - "result": { - "message": "Gain calibrado", - "current_volume": current_volume, - "new_volume": new_volume, - "target_loudness": target_loudness - } - } - - def _cmd_apply_compression(self, params): - """Aplica compresión a un track""" - track_idx = params.get("track_index", 0) - threshold = params.get("threshold", -24.0) - ratio = params.get("ratio", 4.0) - attack = params.get("attack", 0.01) - release = params.get("release", 0.1) - - track = self._song.tracks[track_idx] - - # Buscar o crear compressor - compressor = None - for device in track.devices: - if device.name == "Compressor": - compressor = device - break - - if compressor is None: - # Intentar cargar Compressor desde browser - browser = self.application().browser - for item in self._search_browser_items(browser.effects, "Compressor"): - try: - browser.load_item(item) - compressor = track.devices[-1] - break - except Exception: - pass - - if compressor: - # Setear parámetros (índices pueden variar según versión) - try: - if len(compressor.parameters) > 0: - compressor.parameters[0].value = threshold # Threshold - if len(compressor.parameters) > 1: - compressor.parameters[1].value = ratio # Ratio - if len(compressor.parameters) > 2: - compressor.parameters[2].value = attack # Attack - if len(compressor.parameters) > 3: - compressor.parameters[3].value = release # Release - except Exception: - pass - - return {"status": "success", "result": {"message": "Compresor aplicado"}} - else: - return {"status": "error", "message": "No se pudo cargar compresor"} - - def _cmd_apply_limiting(self, params): - """Aplica limiting para loudness normalization""" - track_idx = params.get("track_index", 0) - target_loudness = params.get("target_loudness", -1.0) # LUFS para master - lookahead = params.get("lookahead", 0.01) - release = params.get("release", 0.05) - - track = self._song.tracks[track_idx] - - # Buscar o crear limiter - limiter = None - for device in track.devices: - if "Limiter" in device.name: - limiter = device - break - - if limiter is None: - # Intentar cargar Limiter desde browser - browser = self.application().browser - for item in self._search_browser_items(browser.effects, "Limiter"): - try: - browser.load_item(item) - limiter = track.devices[-1] - break - except Exception: - pass - - if limiter: - # Setear parámetros - try: - if len(limiter.parameters) > 0: - limiter.parameters[0].value = target_loudness # Gain - if len(limiter.parameters) > 1: - limiter.parameters[1].value = lookahead # Lookahead - if len(limiter.parameters) > 2: - limiter.parameters[2].value = release # Release - except Exception: - pass - - return {"status": "success", "result": {"message": "Limiter aplicado"}} - else: - return {"status": "error", "message": "No se pudo cargar limiter"} - - def _cmd_master_loudness_normalization(self, params): - """Normaliza el loudness del master track""" - track_idx = params.get("track_index", 0) - target_loudness = params.get("target_loudness", -14.0) - - track = self._song.tracks[track_idx] - - # Calibrar gain - current_volume = track.mixer_device.volume.value - adjustment = 10 ** ((target_loudness - (-14)) / 20) # Aproximación - new_volume = max(0.0, min(1.0, current_volume * adjustment)) - - track.mixer_device.volume.value = new_volume - - return { - "status": "success", - "result": { - "message": "Loudness normalizado", - "target_loudness": target_loudness, - "new_volume": new_volume - } - } diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/glm_agents.example.json b/AbletonMCP_AI_BAK_20260328_200801/automation/glm_agents.example.json deleted file mode 100644 index 285238e..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/glm_agents.example.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "implementer": { - "description": "Implements the requested code changes with minimal diff.", - "prompt": "You are a focused implementation worker. Make the requested code changes, keep the diff small, and do not overclaim." - }, - "verifier": { - "description": "Runs validations and checks whether the claimed work is actually complete.", - "prompt": "You are a strict verifier. Run the requested validations, compare code against claims, and report gaps clearly." - }, - "reporter": { - "description": "Writes the final worker report truthfully.", - "prompt": "You are a truthful technical reporter. Summarize only what was actually changed and verified." - } -} diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/glm_agents.team.json b/AbletonMCP_AI_BAK_20260328_200801/automation/glm_agents.team.json deleted file mode 100644 index 65aac2f..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/glm_agents.team.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "planner": { - "description": "Breaks the task into a small, realistic execution plan and identifies the critical path.", - "prompt": "You are the planning agent. Read the task, identify the minimum safe plan, and tell the team what to implement first. Keep the plan concrete and short." - }, - "implementer_core": { - "description": "Implements the main code changes with a minimal diff.", - "prompt": "You are the core implementation agent. Make the requested code changes with the smallest coherent diff. Do not overclaim." - }, - "implementer_aux": { - "description": "Implements helper scripts, manifests, reports, and offline tooling.", - "prompt": "You are the auxiliary implementation agent. Focus on CLI helpers, manifests, reports, and utility scripts. Keep changes isolated." - }, - "validator": { - "description": "Runs validations and checks whether the implementation actually works.", - "prompt": "You are the validation agent. Run the required validations, inspect failures carefully, and report only what really passed." - }, - "retrieval_reviewer": { - "description": "Reviews retrieval/indexing logic for role contamination, cache compatibility, and data-shape issues.", - "prompt": "You are the retrieval reviewer. Inspect role safety, cache compatibility, manifests, and offline retrieval quality. Flag contamination and schema mismatches." - }, - "runtime_guard": { - "description": "Protects the Ableton runtime and blocks risky unrelated changes.", - "prompt": "You are the runtime guard. Prevent unnecessary edits to the Remote Script, runtime socket behavior, or generation path when the task does not require it." - }, - "reporter": { - "description": "Writes the final task report truthfully and concisely.", - "prompt": "You are the reporting agent. Write a technical report that only claims what was truly changed and verified." - } -} diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/invoke_codex_review.ps1 b/AbletonMCP_AI_BAK_20260328_200801/automation/invoke_codex_review.ps1 deleted file mode 100644 index f34456c..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/invoke_codex_review.ps1 +++ /dev/null @@ -1,94 +0,0 @@ -param( - [Parameter(Mandatory = $true)] - [string]$TaskFile, - - [Parameter(Mandatory = $true)] - [string]$ReportFile, - - [Parameter(Mandatory = $true)] - [string]$ProjectRoot, - - [Parameter(Mandatory = $true)] - [string]$OutputFile, - - [string]$CodexModel = "" -) - -$ErrorActionPreference = "Stop" - -function Resolve-CodexCommand() { - $cmd = Get-Command "codex.cmd" -ErrorAction SilentlyContinue - if ($cmd) { - return $cmd.Source - } - - $fallback = Get-Command "codex" -ErrorAction SilentlyContinue - if ($fallback) { - return $fallback.Source - } - - throw "Command not found: codex" -} - -$taskPath = (Resolve-Path -LiteralPath $TaskFile).Path -$reportPath = (Resolve-Path -LiteralPath $ReportFile).Path -$projectPath = (Resolve-Path -LiteralPath $ProjectRoot).Path -$outputPath = [System.IO.Path]::GetFullPath($OutputFile) -$codexCommand = Resolve-CodexCommand - -$reviewPrompt = @" -Read this worker task file: -$taskPath - -Read this GLM report: -$reportPath - -Your job: -1. Inspect the real diff in the repository. -2. Verify whether GLM actually implemented what the report claims. -3. Fix anything incorrect, incomplete, or unsafe. -4. Run the relevant validations mentioned by the task/report. -5. Leave the repository in the best corrected state you can reach in one pass. -6. Write a concise final summary to the output file configured by the CLI. - -Be strict about overclaims. The code is the source of truth, not the report. -"@ - -$codexArgs = @( - "exec", - "--dangerously-bypass-approvals-and-sandbox", - "-C", $projectPath, - "-o", $outputPath -) - -if (-not [string]::IsNullOrWhiteSpace($CodexModel)) { - $codexArgs += @("-m", $CodexModel) -} - -$codexArgs += $reviewPrompt - -$stdoutPath = [System.IO.Path]::Combine([System.IO.Path]::GetDirectoryName($outputPath), "codex_review_stdout.tmp.txt") -$stderrPath = [System.IO.Path]::Combine([System.IO.Path]::GetDirectoryName($outputPath), "codex_review_stderr.tmp.txt") - -if (Test-Path -LiteralPath $stdoutPath) { Remove-Item -LiteralPath $stdoutPath -Force } -if (Test-Path -LiteralPath $stderrPath) { Remove-Item -LiteralPath $stderrPath -Force } - -Push-Location $projectPath -try { - & $codexCommand @codexArgs 1> $stdoutPath 2> $stderrPath - $exitCode = $LASTEXITCODE -} -finally { - Pop-Location -} - -if (Test-Path -LiteralPath $stdoutPath) { - Get-Content -LiteralPath $stdoutPath -} -if (Test-Path -LiteralPath $stderrPath) { - Get-Content -LiteralPath $stderrPath -} - -if ($exitCode -ne 0) { - throw "Codex exited with code $exitCode" -} diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/reports/glm_task_001_benchmark_check.json b/AbletonMCP_AI_BAK_20260328_200801/automation/reports/glm_task_001_benchmark_check.json deleted file mode 100644 index 216df61..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/reports/glm_task_001_benchmark_check.json +++ /dev/null @@ -1,401 +0,0 @@ -{ - "benchmark_info": { - "library_dir": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks", - "top_n": 3, - "roles": [ - "kick", - "snare", - "hat", - "bass_loop", - "vocal_loop", - "top_loop" - ], - "timestamp": "2026-03-20T16:36:16", - "device": "directml" - }, - "references": [ - { - "file_name": "Mr. Pauer, Goyo - Química (Video Oficial).mp3", - "path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\sample\\Mr. Pauer, Goyo - Química (Video Oficial).mp3", - "analysis_time_seconds": 3.09, - "reference_info": { - "tempo": 123.047, - "key": "Cm", - "duration": 145.31, - "rms_mean": 0.17201, - "onset_mean": 1.956218, - "spectral_centroid": 2465.478 - }, - "sections": [ - { - "kind": "verse", - "start": 0.0, - "end": 14.954, - "bars": 8 - }, - { - "kind": "build", - "start": 14.954, - "end": 37.779, - "bars": 12 - }, - { - "kind": "verse", - "start": 37.779, - "end": 46.811, - "bars": 5 - }, - { - "kind": "verse", - "start": 46.811, - "end": 54.822, - "bars": 4 - }, - { - "kind": "drop", - "start": 54.822, - "end": 62.833, - "bars": 4 - }, - { - "kind": "build", - "start": 62.833, - "end": 70.844, - "bars": 4 - }, - { - "kind": "verse", - "start": 70.844, - "end": 92.415, - "bars": 11 - }, - { - "kind": "build", - "start": 92.415, - "end": 101.03, - "bars": 4 - }, - { - "kind": "verse", - "start": 101.03, - "end": 109.041, - "bars": 4 - }, - { - "kind": "build", - "start": 109.041, - "end": 117.098, - "bars": 4 - }, - { - "kind": "outro", - "start": 117.098, - "end": 125.109, - "bars": 4 - }, - { - "kind": "outro", - "start": 125.109, - "end": 133.422, - "bars": 4 - }, - { - "kind": "outro", - "start": 133.422, - "end": 141.433, - "bars": 4 - } - ], - "role_candidates": { - "kick": { - "total_available": 16, - "top_candidates": [ - { - "rank": 1, - "file_name": "BBH - Primer Impacto - Kick 5.wav", - "path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Kick 5.wav", - "score": 0.658173, - "cosine": 0.677478, - "segment_score": 0.807539, - "catalog_score": 0.540981, - "tempo": 117.454, - "key": "Gm", - "duration": 0.5 - }, - { - "rank": 2, - "file_name": "BBH - Primer Impacto - Kick 1.wav", - "path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Kick 1.wav", - "score": 0.650067, - "cosine": 0.633787, - "segment_score": 0.771427, - "catalog_score": 0.540981, - "tempo": 117.454, - "key": "Am", - "duration": 0.5 - }, - { - "rank": 3, - "file_name": "BBH - Primer Impacto - Kick 8.wav", - "path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Kick 8.wav", - "score": 0.642297, - "cosine": 0.689128, - "segment_score": 0.809562, - "catalog_score": 0.5, - "tempo": 258.398, - "key": "Fm", - "duration": 0.484 - } - ] - }, - "snare": { - "total_available": 28, - "top_candidates": [ - { - "rank": 1, - "file_name": "MT Clap & Snare Hit 05.wav", - "path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Clap & Snare Hit 05.wav", - "score": 0.642515, - "cosine": 0.742869, - "segment_score": 0.87862, - "catalog_score": 0.529168, - "tempo": 258.398, - "key": "Dm", - "duration": 0.72 - }, - { - "rank": 2, - "file_name": "MT Clap & Snare Hit 15.wav", - "path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Clap & Snare Hit 15.wav", - "score": 0.623005, - "cosine": 0.754711, - "segment_score": 0.800798, - "catalog_score": 0.518602, - "tempo": 234.908, - "key": "Dm", - "duration": 0.642 - }, - { - "rank": 3, - "file_name": "BBH - Primer Impacto - Clap 1.wav", - "path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Clap 1.wav", - "score": 0.621014, - "cosine": 0.780775, - "segment_score": 0.805699, - "catalog_score": 0.528549, - "tempo": 117.454, - "key": "A#m", - "duration": 0.545 - } - ] - }, - "hat": { - "total_available": 32, - "top_candidates": [ - { - "rank": 1, - "file_name": "BBH - Primer Impacto - Open Hat 2.wav", - "path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Open Hat 2.wav", - "score": 0.602448, - "cosine": 0.750913, - "segment_score": 0.789455, - "catalog_score": 0.539635, - "tempo": 258.398, - "key": "Cm", - "duration": 0.625 - }, - { - "rank": 2, - "file_name": "BBH - Primer Impacto - Open Hat 9.wav", - "path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Open Hat 9.wav", - "score": 0.592739, - "cosine": 0.764186, - "segment_score": 0.682635, - "catalog_score": 0.5, - "tempo": 258.398, - "key": "Gm", - "duration": 0.38 - }, - { - "rank": 3, - "file_name": "MT Hat Hit 04.wav", - "path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Hat Hit 04.wav", - "score": 0.55811, - "cosine": 0.747485, - "segment_score": 0.747228, - "catalog_score": 0.5, - "tempo": 135.999, - "key": "G", - "duration": 0.233 - } - ] - }, - "bass_loop": { - "total_available": 37, - "top_candidates": [ - { - "rank": 1, - "file_name": "Bass_Loop_03_G#m_125.wav", - "path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\Bass_Loop_03_G#m_125.wav", - "score": 0.877488, - "cosine": 0.803278, - "segment_score": 0.883592, - "catalog_score": 0.617711, - "tempo": 123.047, - "key": "Cm", - "duration": 7.68 - }, - { - "rank": 2, - "file_name": "BBH - Primer Impacto - Bass Loop 06 Dmin.wav", - "path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\BBH - Primer Impacto - Bass Loop 06 Dmin.wav", - "score": 0.82587, - "cosine": 0.698374, - "segment_score": 0.799662, - "catalog_score": 0.890835, - "tempo": 123.047, - "key": "Dm", - "duration": 3.84 - }, - { - "rank": 3, - "file_name": "Bass_Loop_05_Cm_125.wav", - "path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\Bass_Loop_05_Cm_125.wav", - "score": 0.818811, - "cosine": 0.695605, - "segment_score": 0.883218, - "catalog_score": 0.617711, - "tempo": 63.024, - "key": "C", - "duration": 7.68 - } - ] - }, - "vocal_loop": { - "total_available": 24, - "top_candidates": [ - { - "rank": 1, - "file_name": "MT Vocal Loop 12 125.wav", - "path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Vocal Loop 12 125.wav", - "score": 0.932334, - "cosine": 0.827361, - "segment_score": 0.923902, - "catalog_score": 0.999437, - "tempo": 123.047, - "key": "D#", - "duration": 1.92 - }, - { - "rank": 2, - "file_name": "MT Vocal Loop 11 125.wav", - "path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Vocal Loop 11 125.wav", - "score": 0.921701, - "cosine": 0.832834, - "segment_score": 0.920162, - "catalog_score": 0.948909, - "tempo": 123.047, - "key": "D#m", - "duration": 1.92 - }, - { - "rank": 3, - "file_name": "MT Vocal Loop 02 128.wav", - "path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\MT Vocal Loop 02 128.wav", - "score": 0.862394, - "cosine": 0.845787, - "segment_score": 0.954025, - "catalog_score": 0.882953, - "tempo": 123.047, - "key": "G#m", - "duration": 3.75 - } - ] - }, - "top_loop": { - "total_available": 144, - "top_candidates": [ - { - "rank": 1, - "file_name": "Top_Loop_11_Any_125.wav", - "path": "C:\\ProgramData\\Ableton\\Live 12 Suite\\Resources\\MIDI Remote Scripts\\librerias\\all_tracks\\Top_Loop_11_Any_125.wav", - "score": 0.906089, - "cosine": 0.752537, - "segment_score": 0.768995, - "catalog_score": 0.859437, - "tempo": 123.047, - "key": "Cm", - "duration": 7.68 - }, - { - "rank": 2, - "file_name": "drum_loop_21_am_125.wav", - "path": "c:\\programdata\\ableton\\live 12 suite\\resources\\midi remote scripts\\librerias\\all_tracks\\drum_loop_21_am_125.wav", - "score": 0.893566, - "cosine": 0.813975, - "segment_score": 0.954219, - "catalog_score": 0.799711, - "tempo": 123.047, - "key": "A#m", - "duration": 7.68 - }, - { - "rank": 3, - "file_name": "drum_loop_23_am_125.wav", - "path": "c:\\programdata\\ableton\\live 12 suite\\resources\\midi remote scripts\\librerias\\all_tracks\\drum_loop_23_am_125.wav", - "score": 0.887869, - "cosine": 0.822104, - "segment_score": 0.94301, - "catalog_score": 0.799711, - "tempo": 123.047, - "key": "A#m", - "duration": 7.68 - } - ] - } - } - } - ], - "contamination_analysis": { - "cross_role_files": [], - "potential_mismatches": [], - "role_score_stats": { - "kick": { - "min": 0.6423, - "max": 0.6582, - "avg": 0.6502, - "count": 3 - }, - "snare": { - "min": 0.621, - "max": 0.6425, - "avg": 0.6288, - "count": 3 - }, - "hat": { - "min": 0.5581, - "max": 0.6024, - "avg": 0.5844, - "count": 3 - }, - "bass_loop": { - "min": 0.8188, - "max": 0.8775, - "avg": 0.8407, - "count": 3 - }, - "vocal_loop": { - "min": 0.8624, - "max": 0.9323, - "avg": 0.9055, - "count": 3 - }, - "top_loop": { - "min": 0.8879, - "max": 0.9061, - "avg": 0.8958, - "count": 3 - } - } - } -} \ No newline at end of file diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/run_glm_codex_loop.ps1 b/AbletonMCP_AI_BAK_20260328_200801/automation/run_glm_codex_loop.ps1 deleted file mode 100644 index c3c0610..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/run_glm_codex_loop.ps1 +++ /dev/null @@ -1,157 +0,0 @@ -param( - [Parameter(Mandatory = $true)] - [string]$TaskFile, - - [Parameter(Mandatory = $true)] - [string]$ReportFile, - - [string]$ProjectRoot = (Resolve-Path (Join-Path $PSScriptRoot "..")).Path, - [string]$GlmModel = "glm-5", - [string]$GlmBaseUrl = $(if ($env:ANTHROPIC_BASE_URL) { $env:ANTHROPIC_BASE_URL } else { "https://coding-intl.dashscope.aliyuncs.com/apps/anthropic" }), - [string]$GlmAuthToken = $env:ANTHROPIC_AUTH_TOKEN, - [string]$GlmAgentsFile = "", - [string]$CodexModel = "", - [string]$TelegramBotToken = $env:TELEGRAM_BOT_TOKEN, - [string]$TelegramChatId = $env:TELEGRAM_CHAT_ID, - [string]$TelegramConfigPath = (Join-Path $PSScriptRoot "telegram.local.json"), - [switch]$SkipCodexReview -) - -$ErrorActionPreference = "Stop" - -function Require-Command([string]$Name) { - if (-not (Get-Command $Name -ErrorAction SilentlyContinue)) { - throw "Command not found: $Name" - } -} - -function Resolve-RepoPath([string]$BasePath, [string]$TargetPath) { - if ([System.IO.Path]::IsPathRooted($TargetPath)) { - return [System.IO.Path]::GetFullPath($TargetPath) - } - return [System.IO.Path]::GetFullPath((Join-Path $BasePath $TargetPath)) -} - -function Resolve-TelegramSettings() { - if (([string]::IsNullOrWhiteSpace($TelegramBotToken) -or [string]::IsNullOrWhiteSpace($TelegramChatId)) -and (Test-Path -LiteralPath $TelegramConfigPath)) { - $config = Get-Content -LiteralPath $TelegramConfigPath -Raw | ConvertFrom-Json - if ([string]::IsNullOrWhiteSpace($TelegramBotToken)) { - $script:TelegramBotToken = $config.bot_token - } - if ([string]::IsNullOrWhiteSpace($TelegramChatId)) { - $script:TelegramChatId = $config.chat_id - } - } -} - -function Send-LoopNotification([string]$Message) { - Resolve-TelegramSettings - if ([string]::IsNullOrWhiteSpace($TelegramBotToken) -or [string]::IsNullOrWhiteSpace($TelegramChatId)) { - return - } - - $notifier = Join-Path $PSScriptRoot "send_telegram_notification.ps1" - try { - & $notifier -Message $Message -BotToken $TelegramBotToken -ChatId $TelegramChatId -ConfigPath $TelegramConfigPath - } - catch { - Write-Warning ("Telegram notification failed: " + $_.Exception.Message) - } -} - -function Resolve-CodexCommand() { - $cmd = Get-Command "codex.cmd" -ErrorAction SilentlyContinue - if ($cmd) { - return $cmd.Source - } - - $fallback = Get-Command "codex" -ErrorAction SilentlyContinue - if ($fallback) { - return $fallback.Source - } - - throw "Command not found: codex" -} - -$projectPath = (Resolve-Path -LiteralPath $ProjectRoot).Path -$taskPath = (Resolve-Path -LiteralPath $TaskFile).Path -$reportPath = Resolve-RepoPath $projectPath $ReportFile -$codexCommand = Resolve-CodexCommand - -$timestamp = Get-Date -Format "yyyyMMdd_HHmmss" -$runDir = Join-Path $projectPath ("automation\\runs\\loop_" + $timestamp) -New-Item -ItemType Directory -Force -Path $runDir | Out-Null -$codexStdoutPath = Join-Path $runDir "codex_stdout.txt" -$codexMessagePath = Join-Path $runDir "codex_last_message.txt" - -$glmRunner = Join-Path $PSScriptRoot "run_glm_cycle.ps1" -Send-LoopNotification("GLM/Codex loop started: $(Split-Path -Leaf $taskPath)") -& $glmRunner ` - -TaskFile $taskPath ` - -ReportFile $reportPath ` - -ProjectRoot $projectPath ` - -Model $GlmModel ` - -BaseUrl $GlmBaseUrl ` - -AuthToken $GlmAuthToken ` - -AgentsFile $GlmAgentsFile ` - -TelegramBotToken $TelegramBotToken ` - -TelegramChatId $TelegramChatId ` - -TelegramConfigPath $TelegramConfigPath - -if ($SkipCodexReview) { - Send-LoopNotification("GLM/Codex loop finished without Codex review: $(Split-Path -Leaf $taskPath)") - Write-Host "GLM worker finished. Codex review skipped by flag." - return -} - -$reviewPrompt = @" -Read this worker task file: -$taskPath - -Read this GLM report: -$reportPath - -Your job: -1. Inspect the real diff in the repository. -2. Verify whether GLM actually implemented what the report claims. -3. Fix anything incorrect, incomplete, or unsafe. -4. Run the relevant validations mentioned by the task/report. -5. Leave the repository in the best corrected state you can reach in one pass. -6. Write a concise final summary to the output file configured by the CLI. - -Be strict about overclaims. The code is the source of truth, not the report. -"@ - -$codexArgs = @( - "exec", - "--dangerously-bypass-approvals-and-sandbox", - "-C", $projectPath, - "-o", $codexMessagePath -) - -if (-not [string]::IsNullOrWhiteSpace($CodexModel)) { - $codexArgs += @("-m", $CodexModel) -} - -$codexArgs += $reviewPrompt - -Write-Host "" -Write-Host "Running Codex review/correction pass..." -Send-LoopNotification("Codex review started: $(Split-Path -Leaf $taskPath)") - -try { - & $codexCommand @codexArgs 2>&1 | Tee-Object -FilePath $codexStdoutPath -} -catch { - Send-LoopNotification("Codex review failed: $(Split-Path -Leaf $taskPath)`n$($_.Exception.Message)") - throw -} - -Send-LoopNotification("GLM/Codex loop finished: $(Split-Path -Leaf $taskPath)`nReport: $(Split-Path -Leaf $reportPath)`nCodex note: $(Split-Path -Leaf $codexMessagePath)") - -Write-Host "" -Write-Host "Loop finished." -Write-Host "Task: $taskPath" -Write-Host "GLM report: $reportPath" -Write-Host "Codex note: $codexMessagePath" -Write-Host "Codex stdout:$codexStdoutPath" diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/run_glm_cycle.ps1 b/AbletonMCP_AI_BAK_20260328_200801/automation/run_glm_cycle.ps1 deleted file mode 100644 index 928f644..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/run_glm_cycle.ps1 +++ /dev/null @@ -1,162 +0,0 @@ -param( - [Parameter(Mandatory = $true)] - [string]$TaskFile, - - [Parameter(Mandatory = $true)] - [string]$ReportFile, - - [string]$ProjectRoot = (Resolve-Path (Join-Path $PSScriptRoot "..")).Path, - [string]$Model = "glm-5", - [string]$BaseUrl = $(if ($env:ANTHROPIC_BASE_URL) { $env:ANTHROPIC_BASE_URL } else { "https://coding-intl.dashscope.aliyuncs.com/apps/anthropic" }), - [string]$AuthToken = $env:ANTHROPIC_AUTH_TOKEN, - [string]$AgentsFile = (Join-Path $PSScriptRoot "glm_agents.team.json"), - [string]$TelegramBotToken = $env:TELEGRAM_BOT_TOKEN, - [string]$TelegramChatId = $env:TELEGRAM_CHAT_ID, - [string]$TelegramConfigPath = (Join-Path $PSScriptRoot "telegram.local.json"), - [switch]$VerboseLogs -) - -$ErrorActionPreference = "Stop" - -function Require-Command([string]$Name) { - if (-not (Get-Command $Name -ErrorAction SilentlyContinue)) { - throw "Command not found: $Name" - } -} - -function Require-File([string]$PathValue, [string]$Label) { - if (-not (Test-Path -LiteralPath $PathValue)) { - throw "$Label not found: $PathValue" - } -} - -function Resolve-RepoPath([string]$BasePath, [string]$TargetPath) { - if ([System.IO.Path]::IsPathRooted($TargetPath)) { - return [System.IO.Path]::GetFullPath($TargetPath) - } - return [System.IO.Path]::GetFullPath((Join-Path $BasePath $TargetPath)) -} - -function Resolve-TelegramSettings() { - if (([string]::IsNullOrWhiteSpace($TelegramBotToken) -or [string]::IsNullOrWhiteSpace($TelegramChatId)) -and (Test-Path -LiteralPath $TelegramConfigPath)) { - $config = Get-Content -LiteralPath $TelegramConfigPath -Raw | ConvertFrom-Json - if ([string]::IsNullOrWhiteSpace($TelegramBotToken)) { - $script:TelegramBotToken = $config.bot_token - } - if ([string]::IsNullOrWhiteSpace($TelegramChatId)) { - $script:TelegramChatId = $config.chat_id - } - } -} - -function Send-RunNotification([string]$Message) { - Resolve-TelegramSettings - if ([string]::IsNullOrWhiteSpace($TelegramBotToken) -or [string]::IsNullOrWhiteSpace($TelegramChatId)) { - return - } - - $notifier = Join-Path $PSScriptRoot "send_telegram_notification.ps1" - try { - & $notifier -Message $Message -BotToken $TelegramBotToken -ChatId $TelegramChatId -ConfigPath $TelegramConfigPath - } - catch { - Write-Warning ("Telegram notification failed: " + $_.Exception.Message) - } -} - -Require-Command "claude" -Require-File $TaskFile "Task file" - -if ([string]::IsNullOrWhiteSpace($BaseUrl)) { - throw "ANTHROPIC_BASE_URL is not set. Pass -BaseUrl or export the env var first." -} -if ([string]::IsNullOrWhiteSpace($AuthToken)) { - throw "ANTHROPIC_AUTH_TOKEN is not set. Pass -AuthToken or export the env var first." -} - -$env:ANTHROPIC_BASE_URL = $BaseUrl -$env:ANTHROPIC_AUTH_TOKEN = $AuthToken -$env:CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC = "1" -$env:ANTHROPIC_MODEL = $Model -$env:ANTHROPIC_SMALL_FAST_MODEL = $Model -$env:ANTHROPIC_DEFAULT_HAIKU_MODEL = $Model -$env:ANTHROPIC_DEFAULT_SONNET_MODEL = $Model -$env:ANTHROPIC_DEFAULT_OPUS_MODEL = $Model - -$taskPath = (Resolve-Path -LiteralPath $TaskFile).Path -$projectPath = (Resolve-Path -LiteralPath $ProjectRoot).Path -$reportPath = Resolve-RepoPath $projectPath $ReportFile -$reportDir = Split-Path -Parent $reportPath -New-Item -ItemType Directory -Force -Path $reportDir | Out-Null - -$timestamp = Get-Date -Format "yyyyMMdd_HHmmss" -$runDir = Join-Path $projectPath ("automation\\runs\\glm_" + $timestamp) -New-Item -ItemType Directory -Force -Path $runDir | Out-Null -$stdoutPath = Join-Path $runDir "glm_stdout.txt" - -$prompt = @" -You are running as the GLM worker on this Windows repository. - -Repository root: -$projectPath - -Task file to follow exactly: -$taskPath - -You must: -1. Read the task markdown and implement the requested changes in the repository. -2. Run the validations requested by the task. -3. Create or overwrite this report file with a truthful report: -$reportPath -4. Do not overclaim. If something is incomplete, say so explicitly in the report. -5. Keep the diff focused. -6. If custom agents are available, use them aggressively and in parallel where safe: - - planner first - - implementer_core and implementer_aux for disjoint work - - validator before finishing - - retrieval_reviewer or runtime_guard when relevant - - reporter last - -Open and follow the task markdown from disk instead of asking for the task again. -"@ - -$claudeArgs = @( - "-p", - "--dangerously-skip-permissions", - "--effort", "max", - "--model", $Model, - "--add-dir", $projectPath -) - -if (-not [string]::IsNullOrWhiteSpace($AgentsFile)) { - $agentsPath = (Resolve-Path -LiteralPath $AgentsFile).Path - $claudeArgs += @("--agents", (Get-Content -LiteralPath $agentsPath -Raw)) -} - -if ($VerboseLogs) { - $claudeArgs += "--verbose" -} - -Write-Host "Running GLM worker with model $Model..." -Send-RunNotification("GLM worker started: $(Split-Path -Leaf $taskPath)") - -try { - $prompt | & claude @claudeArgs 2>&1 | Tee-Object -FilePath $stdoutPath -} -catch { - Send-RunNotification("GLM worker failed: $(Split-Path -Leaf $taskPath)`n$($_.Exception.Message)") - throw -} - -if (-not (Test-Path -LiteralPath $reportPath)) { - Send-RunNotification("GLM worker failed: missing report for $(Split-Path -Leaf $taskPath)") - throw "GLM finished but did not create the expected report file: $reportPath" -} - -Send-RunNotification("GLM worker finished: $(Split-Path -Leaf $taskPath)`nReport: $(Split-Path -Leaf $reportPath)") - -Write-Host "" -Write-Host "GLM cycle finished." -Write-Host "Task: $taskPath" -Write-Host "Report: $reportPath" -Write-Host "Stdout: $stdoutPath" diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/run_task_queue.ps1 b/AbletonMCP_AI_BAK_20260328_200801/automation/run_task_queue.ps1 deleted file mode 100644 index 0f52379..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/run_task_queue.ps1 +++ /dev/null @@ -1,141 +0,0 @@ -param( - [string]$QueueFile = (Join-Path $PSScriptRoot "task_queue.json"), - [string]$ProjectRoot = (Resolve-Path (Join-Path $PSScriptRoot "..")).Path, - [string]$GlmModel = "glm-5", - [string]$GlmBaseUrl = $(if ($env:ANTHROPIC_BASE_URL) { $env:ANTHROPIC_BASE_URL } else { "https://coding-intl.dashscope.aliyuncs.com/apps/anthropic" }), - [string]$GlmAuthToken = $env:ANTHROPIC_AUTH_TOKEN, - [string]$GlmAgentsFile = (Join-Path $PSScriptRoot "glm_agents.team.json"), - [string]$CodexModel = "", - [string]$TelegramBotToken = $env:TELEGRAM_BOT_TOKEN, - [string]$TelegramChatId = $env:TELEGRAM_CHAT_ID, - [string]$TelegramConfigPath = (Join-Path $PSScriptRoot "telegram.local.json"), - [int]$PollSeconds = 30, - [switch]$Watch, - [switch]$ContinueOnError -) - -$ErrorActionPreference = "Stop" - -function Resolve-RepoPath([string]$BasePath, [string]$TargetPath) { - if ([System.IO.Path]::IsPathRooted($TargetPath)) { - return [System.IO.Path]::GetFullPath($TargetPath) - } - return [System.IO.Path]::GetFullPath((Join-Path $BasePath $TargetPath)) -} - -function Load-Queue([string]$PathValue) { - return Get-Content -LiteralPath $PathValue -Raw | ConvertFrom-Json -Depth 20 -} - -function Save-Queue([string]$PathValue, $QueueObject) { - $QueueObject | ConvertTo-Json -Depth 20 | Set-Content -LiteralPath $PathValue -Encoding UTF8 -} - -function Resolve-TelegramSettings() { - if (([string]::IsNullOrWhiteSpace($TelegramBotToken) -or [string]::IsNullOrWhiteSpace($TelegramChatId)) -and (Test-Path -LiteralPath $TelegramConfigPath)) { - $config = Get-Content -LiteralPath $TelegramConfigPath -Raw | ConvertFrom-Json - if ([string]::IsNullOrWhiteSpace($TelegramBotToken)) { - $script:TelegramBotToken = $config.bot_token - } - if ([string]::IsNullOrWhiteSpace($TelegramChatId)) { - $script:TelegramChatId = $config.chat_id - } - } -} - -function Send-QueueNotification([string]$Message) { - Resolve-TelegramSettings - if ([string]::IsNullOrWhiteSpace($TelegramBotToken) -or [string]::IsNullOrWhiteSpace($TelegramChatId)) { - return - } - - $notifier = Join-Path $PSScriptRoot "send_telegram_notification.ps1" - try { - & $notifier -Message $Message -BotToken $TelegramBotToken -ChatId $TelegramChatId -ConfigPath $TelegramConfigPath - } - catch { - Write-Warning ("Telegram notification failed: " + $_.Exception.Message) - } -} - -function Find-NextTask($QueueObject) { - foreach ($task in $QueueObject.tasks) { - if ($task.enabled -and $task.status -eq "pending") { - return $task - } - } - return $null -} - -$projectPath = (Resolve-Path -LiteralPath $ProjectRoot).Path -$queuePath = Resolve-RepoPath $projectPath $QueueFile -$loopRunner = Join-Path $PSScriptRoot "run_glm_codex_loop.ps1" -$historyDir = Join-Path $projectPath "automation\\runs\\queue" -New-Item -ItemType Directory -Force -Path $historyDir | Out-Null - -Send-QueueNotification("AbletonMCP_AI queue runner started on $(Get-Date -Format 'yyyy-MM-dd HH:mm:ss'). Watching=$Watch ContinueOnError=$ContinueOnError") - -do { - $queue = Load-Queue $queuePath - $task = Find-NextTask $queue - - if ($null -eq $task) { - if ($Watch) { - Start-Sleep -Seconds $PollSeconds - continue - } - break - } - - $taskPath = Resolve-RepoPath $projectPath $task.task_file - $reportPath = Resolve-RepoPath $projectPath $task.report_file - - $task.status = "running" - $task.started_at = (Get-Date).ToString("s") - Save-Queue $queuePath $queue - Send-QueueNotification("Queue task started: [$($task.id)] $($task.title)") - - try { - & $loopRunner ` - -TaskFile $taskPath ` - -ReportFile $reportPath ` - -ProjectRoot $projectPath ` - -GlmModel $GlmModel ` - -GlmBaseUrl $GlmBaseUrl ` - -GlmAuthToken $GlmAuthToken ` - -GlmAgentsFile $GlmAgentsFile ` - -CodexModel $CodexModel ` - -TelegramBotToken $TelegramBotToken ` - -TelegramChatId $TelegramChatId ` - -TelegramConfigPath $TelegramConfigPath - - $queue = Load-Queue $queuePath - foreach ($item in $queue.tasks) { - if ($item.id -eq $task.id) { - $item.status = "completed" - $item.completed_at = (Get-Date).ToString("s") - break - } - } - Save-Queue $queuePath $queue - Send-QueueNotification("Queue task completed: [$($task.id)] $($task.title)") - } - catch { - $queue = Load-Queue $queuePath - foreach ($item in $queue.tasks) { - if ($item.id -eq $task.id) { - $item.status = "failed" - $item.failed_at = (Get-Date).ToString("s") - $item.error = $_.Exception.Message - break - } - } - Save-Queue $queuePath $queue - Send-QueueNotification("Queue task failed: [$($task.id)] $($task.title)`n$($_.Exception.Message)") - - if (-not $ContinueOnError) { - throw - } - } -} -while ($true) diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/send_telegram_notification.ps1 b/AbletonMCP_AI_BAK_20260328_200801/automation/send_telegram_notification.ps1 deleted file mode 100644 index 162458b..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/send_telegram_notification.ps1 +++ /dev/null @@ -1,33 +0,0 @@ -param( - [Parameter(Mandatory = $true)] - [string]$Message, - - [string]$BotToken = $env:TELEGRAM_BOT_TOKEN, - [string]$ChatId = $env:TELEGRAM_CHAT_ID, - [string]$ConfigPath = (Join-Path $PSScriptRoot "telegram.local.json") -) - -$ErrorActionPreference = "Stop" - -if (([string]::IsNullOrWhiteSpace($BotToken) -or [string]::IsNullOrWhiteSpace($ChatId)) -and (Test-Path -LiteralPath $ConfigPath)) { - $config = Get-Content -LiteralPath $ConfigPath -Raw | ConvertFrom-Json - if ([string]::IsNullOrWhiteSpace($BotToken)) { - $BotToken = $config.bot_token - } - if ([string]::IsNullOrWhiteSpace($ChatId)) { - $ChatId = $config.chat_id - } -} - -if ([string]::IsNullOrWhiteSpace($BotToken) -or [string]::IsNullOrWhiteSpace($ChatId)) { - exit 0 -} - -$uri = "https://api.telegram.org/bot$BotToken/sendMessage" -$body = @{ - chat_id = $ChatId - text = $Message - disable_web_page_preview = $true -} - -Invoke-RestMethod -Uri $uri -Method Post -Body $body | Out-Null diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/ableton-glm-loop.service b/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/ableton-glm-loop.service deleted file mode 100644 index 9629ddc..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/ableton-glm-loop.service +++ /dev/null @@ -1,18 +0,0 @@ -[Unit] -Description=AbletonMCP_AI autonomous GLM/Codex queue -After=network-online.target -Wants=network-online.target - -[Service] -Type=simple -User=ren -WorkingDirectory=/mnt/c/ProgramData/Ableton/Live 12 Suite/Resources/MIDI Remote Scripts/AbletonMCP_AI -Environment=LOCAL_ENV_FILE=/mnt/c/ProgramData/Ableton/Live 12 Suite/Resources/MIDI Remote Scripts/AbletonMCP_AI/automation/wsl.local.env -ExecStart=/bin/bash /mnt/c/ProgramData/Ableton/Live 12 Suite/Resources/MIDI Remote Scripts/AbletonMCP_AI/automation/wsl/run_task_queue.sh -Restart=always -RestartSec=15 -StandardOutput=append:/mnt/c/ProgramData/Ableton/Live 12 Suite/Resources/MIDI Remote Scripts/AbletonMCP_AI/automation/wsl_runtime/logs/service.log -StandardError=append:/mnt/c/ProgramData/Ableton/Live 12 Suite/Resources/MIDI Remote Scripts/AbletonMCP_AI/automation/wsl_runtime/logs/service.log - -[Install] -WantedBy=multi-user.target diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/bootstrap_wsl_runtime.sh b/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/bootstrap_wsl_runtime.sh deleted file mode 100644 index 30d5b9c..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/bootstrap_wsl_runtime.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" -RUNTIME_DIR="$PROJECT_ROOT/automation/wsl_runtime" -CODEX_HOME_DIR="$RUNTIME_DIR/codex_home" -WINDOWS_CODEX_HOME="/mnt/c/Users/ren/.codex" -ENV_FILE="$PROJECT_ROOT/automation/wsl.local.env" -OPENAI_API_KEY_VALUE="" - -mkdir -p "$CODEX_HOME_DIR" "$RUNTIME_DIR/logs" - -if [[ -f "$WINDOWS_CODEX_HOME/auth.json" && ! -f "$CODEX_HOME_DIR/auth.json" ]]; then - cp "$WINDOWS_CODEX_HOME/auth.json" "$CODEX_HOME_DIR/auth.json" -fi - -if [[ -f "$CODEX_HOME_DIR/auth.json" ]]; then - OPENAI_API_KEY_VALUE="$(jq -r '.OPENAI_API_KEY // empty' "$CODEX_HOME_DIR/auth.json" 2>/dev/null || true)" -fi - -cat > "$CODEX_HOME_DIR/config.toml" <<'EOF' -model = "gpt-5.4" - -[sandbox_workspace_write] -network_access = true -EOF - -cat > "$ENV_FILE" <> "$ENV_FILE" -fi - -chmod 600 "$ENV_FILE" "$CODEX_HOME_DIR/auth.json" 2>/dev/null || true -chmod +x "$SCRIPT_DIR/"*.sh - -echo "WSL runtime bootstrapped" -echo "Runtime dir: $RUNTIME_DIR" -echo "Env file: $ENV_FILE" -echo "Codex home: $CODEX_HOME_DIR" diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/docker-compose.yml b/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/docker-compose.yml deleted file mode 100644 index 79eca51..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/docker-compose.yml +++ /dev/null @@ -1,163 +0,0 @@ -services: - postgres: - image: postgres:16-alpine - container_name: abletonmcp-postgres - restart: unless-stopped - environment: - POSTGRES_USER: ${POSTGRES_USER:-postgres} - POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-changeme} - POSTGRES_DB: ${POSTGRES_BOOTSTRAP_DB:-postgres} - PGDATA: /var/lib/postgresql/data/pgdata - GITEA_DB_NAME: ${GITEA_DB_NAME:-gitea} - N8N_DB_NAME: ${N8N_DB_NAME:-n8n} - volumes: - - postgres-data:/var/lib/postgresql/data - - ./initdb:/docker-entrypoint-initdb.d:ro - ports: - - "${POSTGRES_PORT:-5432}:5432" - healthcheck: - test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres} -d ${POSTGRES_BOOTSTRAP_DB:-postgres}"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 20s - networks: - - internal - - redis: - image: redis:7-alpine - container_name: abletonmcp-redis - restart: unless-stopped - command: - - redis-server - - --requirepass - - ${REDIS_PASSWORD:-changeme} - - --appendonly - - "yes" - - --save - - "60" - - "1000" - volumes: - - redis-data:/data - ports: - - "${REDIS_PORT:-6379}:6379" - healthcheck: - test: ["CMD-SHELL", "redis-cli -a ${REDIS_PASSWORD:-changeme} ping | grep -q PONG"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 10s - networks: - - internal - - gitea: - image: gitea/gitea:1.21-rootless - container_name: abletonmcp-gitea - restart: unless-stopped - environment: - USER_UID: 1000 - USER_GID: 1000 - GITEA__database__DB_TYPE: postgres - GITEA__database__HOST: postgres:5432 - GITEA__database__NAME: ${GITEA_DB_NAME:-gitea} - GITEA__database__USER: ${POSTGRES_USER:-postgres} - GITEA__database__PASSWD: ${POSTGRES_PASSWORD:-changeme} - GITEA__server__DOMAIN: ${GITEA_DOMAIN:-localhost} - GITEA__server__ROOT_URL: ${GITEA_ROOT_URL:-http://localhost:3000} - GITEA__server__HTTP_PORT: 3000 - GITEA__server__SSH_DOMAIN: ${GITEA_SSH_DOMAIN:-localhost} - GITEA__server__SSH_PORT: ${GITEA_SSH_PORT:-222} - GITEA__server__START_SSH_SERVER: "true" - GITEA__server__SSH_LISTEN_PORT: 222 - GITEA__security__INSTALL_LOCK: ${GITEA_SECURITY_INSTALL_LOCK:-true} - GITEA__service__DISABLE_REGISTRATION: "true" - GITEA__server__OFFLINE_MODE: ${GITEA_OFFLINE_MODE:-true} - volumes: - - gitea-data:/var/lib/gitea - - gitea-config:/etc/gitea - - gitea-logs:/var/log/gitea - ports: - - "${GITEA_HTTP_PORT:-3000}:3000" - - "${GITEA_SSH_PORT:-222}:222" - healthcheck: - test: ["CMD-SHELL", "wget -q --spider http://localhost:3000/api/healthz || exit 1"] - interval: 15s - timeout: 5s - retries: 10 - start_period: 45s - depends_on: - postgres: - condition: service_healthy - networks: - - internal - - n8n: - image: n8nio/n8n:latest - container_name: abletonmcp-n8n - restart: unless-stopped - environment: - DB_TYPE: postgresdb - DB_POSTGRESDB_HOST: postgres - DB_POSTGRESDB_PORT: 5432 - DB_POSTGRESDB_DATABASE: ${N8N_DB_NAME:-n8n} - DB_POSTGRESDB_USER: ${POSTGRES_USER:-postgres} - DB_POSTGRESDB_PASSWORD: ${POSTGRES_PASSWORD:-changeme} - N8N_PORT: 5678 - N8N_PROTOCOL: http - N8N_HOST: ${N8N_HOST:-localhost} - N8N_PATH: ${N8N_PATH:-/} - N8N_ENCRYPTION_KEY: ${N8N_ENCRYPTION_KEY:-changeme-change-this} - N8N_LOG_LEVEL: ${N8N_LOG_LEVEL:-info} - N8N_EXECUTIONS_MODE: ${N8N_EXECUTIONS_MODE:-regular} - N8N_BASIC_AUTH_ACTIVE: ${N8N_BASIC_AUTH_ACTIVE:-true} - N8N_BASIC_AUTH_USER: ${N8N_BASIC_AUTH_USER:-admin} - N8N_BASIC_AUTH_PASSWORD: ${N8N_BASIC_AUTH_PASSWORD:-changeme} - N8N_COOKIE_POLICY: ${N8N_COOKIE_POLICY:-lax} - N8N_HOST_ALLOW_LIST: ${N8N_HOST_ALLOW_LIST:-localhost,127.0.0.1} - N8N_WEBHOOK_URL: ${N8N_WEBHOOK_URL:-http://localhost:5678/} - N8N_EDITOR_BASE_URL: ${N8N_EDITOR_BASE_URL:-http://localhost:5678} - GENERIC_TIMEZONE: ${TZ:-UTC} - TZ: ${TZ:-UTC} - N8N_DIAGNOSTICS_ENABLED: ${N8N_DIAGNOSTICS_ENABLED:-false} - N8N_VERSION_NOTIFICATIONS_ENABLED: ${N8N_VERSION_NOTIFICATIONS_ENABLED:-false} - volumes: - - n8n-data:/home/node/.n8n - - n8n-logs:/home/node/.npm/_logs - - ${PROJECT_PATH:-/mnt/c/ProgramData/Ableton/Live 12 Suite/Resources/MIDI Remote Scripts/AbletonMCP_AI}:/project:rw - - ${PROJECT_PATH:-/mnt/c/ProgramData/Ableton/Live 12 Suite/Resources/MIDI Remote Scripts/AbletonMCP_AI}/automation/workflows:/workflows:ro - ports: - - "${N8N_PORT:-5678}:5678" - healthcheck: - test: ["CMD-SHELL", "wget -q --spider http://localhost:5678/healthz || exit 1"] - interval: 15s - timeout: 5s - retries: 10 - start_period: 45s - depends_on: - postgres: - condition: service_healthy - redis: - condition: service_healthy - networks: - - internal - -networks: - internal: - name: abletonmcp-network - driver: bridge - -volumes: - postgres-data: - name: abletonmcp-postgres-data - gitea-data: - name: abletonmcp-gitea-data - gitea-config: - name: abletonmcp-gitea-config - gitea-logs: - name: abletonmcp-gitea-logs - redis-data: - name: abletonmcp-redis-data - n8n-data: - name: abletonmcp-n8n-data - n8n-logs: - name: abletonmcp-n8n-logs diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/initdb/01-init-multiple-dbs.sh b/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/initdb/01-init-multiple-dbs.sh deleted file mode 100644 index 67f014b..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/initdb/01-init-multiple-dbs.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/sh -set -eu - -create_db() { - db_name="$1" - psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "${POSTGRES_BOOTSTRAP_DB:-postgres}" <<-EOSQL - SELECT 'CREATE DATABASE "${db_name}"' - WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = '${db_name}')\gexec -EOSQL -} - -if [ -n "${GITEA_DB_NAME:-}" ]; then - create_db "$GITEA_DB_NAME" -fi - -if [ -n "${N8N_DB_NAME:-}" ]; then - create_db "$N8N_DB_NAME" -fi diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/install_service.sh b/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/install_service.sh deleted file mode 100644 index 2630724..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/install_service.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -SERVICE_SRC="$SCRIPT_DIR/ableton-glm-loop.service" -SERVICE_DST="/etc/systemd/system/ableton-glm-loop.service" - -sudo cp "$SERVICE_SRC" "$SERVICE_DST" -sudo systemctl daemon-reload -sudo systemctl enable ableton-glm-loop.service -sudo systemctl restart ableton-glm-loop.service -sudo systemctl status --no-pager ableton-glm-loop.service || true diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/run_glm_codex_loop.sh b/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/run_glm_codex_loop.sh deleted file mode 100644 index 05e7c2f..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/run_glm_codex_loop.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" -LOCAL_ENV_FILE="${LOCAL_ENV_FILE:-$PROJECT_ROOT/automation/wsl.local.env}" - -if [[ -f "$LOCAL_ENV_FILE" ]]; then - # shellcheck disable=SC1090 - source "$LOCAL_ENV_FILE" -fi - -TASK_FILE="${1:?task file is required}" -REPORT_FILE="${2:?report file is required}" -GLM_MODEL="${GLM_MODEL:-glm-5}" -CODEX_MODEL="${CODEX_MODEL:-gpt-5.4}" -SKIP_CODEX_REVIEW="${SKIP_CODEX_REVIEW:-0}" -CODEX_HOME="${CODEX_HOME:-$PROJECT_ROOT/automation/wsl_runtime/codex_home}" -export CODEX_HOME -if [[ -n "${OPENAI_API_KEY:-}" ]]; then - export OPENAI_API_KEY -fi - -RUN_DIR="$PROJECT_ROOT/automation/runs/loop_$(date +%Y%m%d_%H%M%S)" -CODEX_STDOUT_PATH="$RUN_DIR/codex_stdout.txt" -CODEX_MESSAGE_PATH="$RUN_DIR/codex_last_message.txt" -mkdir -p "$RUN_DIR" - -notify() { - "$SCRIPT_DIR/send_telegram.sh" "$1" || true -} - -notify "GLM/Codex loop started: $(basename "$TASK_FILE")" -"$SCRIPT_DIR/run_glm_cycle.sh" "$TASK_FILE" "$REPORT_FILE" - -if [[ "$SKIP_CODEX_REVIEW" == "1" ]]; then - notify "GLM/Codex loop finished without Codex review: $(basename "$TASK_FILE")" - exit 0 -fi - -notify "Codex review started: $(basename "$TASK_FILE")" - -WIN_TASK_FILE="$(wslpath -w "$TASK_FILE")" -WIN_REPORT_FILE="$(wslpath -w "$REPORT_FILE")" -WIN_PROJECT_ROOT="$(wslpath -w "$PROJECT_ROOT")" -WIN_CODEX_MESSAGE_PATH="$(wslpath -w "$CODEX_MESSAGE_PATH")" -WIN_REVIEW_SCRIPT="$(wslpath -w "$PROJECT_ROOT/automation/invoke_codex_review.ps1")" - -if ! /mnt/c/Windows/System32/WindowsPowerShell/v1.0/powershell.exe -NoProfile -ExecutionPolicy Bypass -File "$WIN_REVIEW_SCRIPT" -TaskFile "$WIN_TASK_FILE" -ReportFile "$WIN_REPORT_FILE" -ProjectRoot "$WIN_PROJECT_ROOT" -OutputFile "$WIN_CODEX_MESSAGE_PATH" -CodexModel "$CODEX_MODEL" 2>&1 | tee "$CODEX_STDOUT_PATH"; then - notify "Codex review failed: $(basename "$TASK_FILE")" - exit 1 -fi - -notify "GLM/Codex loop finished: $(basename "$TASK_FILE")" -echo "Loop finished" -echo "Task: $TASK_FILE" -echo "GLM report: $REPORT_FILE" -echo "Codex note: $CODEX_MESSAGE_PATH" -echo "Codex stdout:$CODEX_STDOUT_PATH" diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/run_glm_cycle.sh b/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/run_glm_cycle.sh deleted file mode 100644 index d5020a4..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/run_glm_cycle.sh +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" -LOCAL_ENV_FILE="${LOCAL_ENV_FILE:-$PROJECT_ROOT/automation/wsl.local.env}" - -if [[ -f "$LOCAL_ENV_FILE" ]]; then - # shellcheck disable=SC1090 - source "$LOCAL_ENV_FILE" -fi - -TASK_FILE="${1:?task file is required}" -REPORT_FILE="${2:?report file is required}" -GLM_MODEL="${GLM_MODEL:-glm-5}" -GLM_AGENTS_FILE="${GLM_AGENTS_FILE:-$PROJECT_ROOT/automation/glm_agents.team.json}" - -export ANTHROPIC_BASE_URL="${ANTHROPIC_BASE_URL:-https://coding-intl.dashscope.aliyuncs.com/apps/anthropic}" -export ANTHROPIC_AUTH_TOKEN="${ANTHROPIC_AUTH_TOKEN:?ANTHROPIC_AUTH_TOKEN is required}" -export CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC="1" -export ANTHROPIC_MODEL="$GLM_MODEL" -export ANTHROPIC_SMALL_FAST_MODEL="$GLM_MODEL" -export ANTHROPIC_DEFAULT_HAIKU_MODEL="$GLM_MODEL" -export ANTHROPIC_DEFAULT_SONNET_MODEL="$GLM_MODEL" -export ANTHROPIC_DEFAULT_OPUS_MODEL="$GLM_MODEL" - -RUN_DIR="$PROJECT_ROOT/automation/runs/glm_$(date +%Y%m%d_%H%M%S)" -STDOUT_PATH="$RUN_DIR/glm_stdout.txt" -mkdir -p "$RUN_DIR" "$(dirname "$REPORT_FILE")" - -notify() { - "$SCRIPT_DIR/send_telegram.sh" "$1" || true -} - -PROMPT=$(cat <&1 | tee "$STDOUT_PATH"; then - notify "GLM worker failed: $(basename "$TASK_FILE")" - exit 1 -fi - -if [[ ! -f "$REPORT_FILE" ]]; then - notify "GLM worker failed: missing report for $(basename "$TASK_FILE")" - echo "missing report: $REPORT_FILE" >&2 - exit 1 -fi - -notify "GLM worker finished: $(basename "$TASK_FILE")" -echo "GLM cycle finished" -echo "Task: $TASK_FILE" -echo "Report: $REPORT_FILE" -echo "Stdout: $STDOUT_PATH" diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/run_task_queue.sh b/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/run_task_queue.sh deleted file mode 100644 index aec3377..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/run_task_queue.sh +++ /dev/null @@ -1,106 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" -QUEUE_FILE="${QUEUE_FILE:-$PROJECT_ROOT/automation/task_queue.json}" -LOCAL_ENV_FILE="${LOCAL_ENV_FILE:-$PROJECT_ROOT/automation/wsl.local.env}" -POLL_SECONDS="${POLL_SECONDS:-30}" -WATCH="${WATCH:-1}" -CONTINUE_ON_ERROR="${CONTINUE_ON_ERROR:-1}" - -if [[ -f "$LOCAL_ENV_FILE" ]]; then - # shellcheck disable=SC1090 - source "$LOCAL_ENV_FILE" -fi - -notify() { - "$SCRIPT_DIR/send_telegram.sh" "$1" || true -} - -queue_has_pending() { - jq -e '.tasks[] | select(.enabled == true and .status == "pending")' "$QUEUE_FILE" >/dev/null -} - -read_next_task() { - jq -r '.tasks[] | select(.enabled == true and .status == "pending") | @base64' "$QUEUE_FILE" | head -n 1 -} - -update_task_status() { - local task_id="$1" - local status="$2" - local field="$3" - local value="$4" - local tmp - local queue_dir - queue_dir="$(dirname "$QUEUE_FILE")" - tmp="$(mktemp "$queue_dir/.task_queue.tmp.XXXXXX")" - jq --arg id "$task_id" --arg status "$status" --arg field "$field" --arg value "$value" ' - .tasks |= map( - if .id == $id then - .status = $status | .[$field] = $value - else - . - end - )' "$QUEUE_FILE" > "$tmp" - mv "$tmp" "$QUEUE_FILE" -} - -set_task_error() { - local task_id="$1" - local message="$2" - local tmp - local queue_dir - queue_dir="$(dirname "$QUEUE_FILE")" - tmp="$(mktemp "$queue_dir/.task_queue.tmp.XXXXXX")" - jq --arg id "$task_id" --arg msg "$message" ' - .tasks |= map( - if .id == $id then - .error = $msg - else - . - end - )' "$QUEUE_FILE" > "$tmp" - mv "$tmp" "$QUEUE_FILE" -} - -notify "AbletonMCP_AI queue runner started on $(date '+%Y-%m-%d %H:%M:%S')" - -while true; do - if ! queue_has_pending; then - if [[ "$WATCH" == "1" ]]; then - sleep "$POLL_SECONDS" - continue - fi - break - fi - - task_b64="$(read_next_task)" - if [[ -z "$task_b64" ]]; then - sleep "$POLL_SECONDS" - continue - fi - - task_json="$(printf '%s' "$task_b64" | base64 -d)" - task_id="$(printf '%s' "$task_json" | jq -r '.id')" - task_title="$(printf '%s' "$task_json" | jq -r '.title')" - task_file_rel="$(printf '%s' "$task_json" | jq -r '.task_file')" - report_file_rel="$(printf '%s' "$task_json" | jq -r '.report_file')" - task_file="$PROJECT_ROOT/${task_file_rel//\\//}" - report_file="$PROJECT_ROOT/${report_file_rel//\\//}" - - update_task_status "$task_id" "running" "started_at" "$(date -Iseconds)" - notify "Queue task started: [$task_id] $task_title" - - if "$SCRIPT_DIR/run_glm_codex_loop.sh" "$task_file" "$report_file"; then - update_task_status "$task_id" "completed" "completed_at" "$(date -Iseconds)" - notify "Queue task completed: [$task_id] $task_title" - else - update_task_status "$task_id" "failed" "failed_at" "$(date -Iseconds)" - set_task_error "$task_id" "task runner failed" - notify "Queue task failed: [$task_id] $task_title" - if [[ "$CONTINUE_ON_ERROR" != "1" ]]; then - exit 1 - fi - fi -done diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/install.sh b/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/install.sh deleted file mode 100644 index b7b84bb..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/install.sh +++ /dev/null @@ -1,281 +0,0 @@ -#!/usr/bin/env bash -# -# install.sh - Install Docker, Docker Compose, and local Python runtime on Ubuntu 24.04 WSL2 -# Idempotent: safe to run multiple times -# - -set -euo pipefail - -readonly RED='\033[0;31m' -readonly GREEN='\033[0;32m' -readonly YELLOW='\033[1;33m' -readonly NC='\033[0m' - -log_info() { echo -e "${GREEN}[INFO]${NC} $*"; } -log_warn() { echo -e "${YELLOW}[WARN]${NC} $*"; } -log_error() { echo -e "${RED}[ERROR]${NC} $*"; } - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -WSL_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" -AUTOMATION_DIR="$(cd "$WSL_DIR/.." && pwd)" -PROJECT_ROOT="$(cd "$AUTOMATION_DIR/.." && pwd)" -RUNTIME_DIR="$AUTOMATION_DIR/wsl_runtime" -VENV_DIR="$RUNTIME_DIR/venv" - -check_sudo() { - if [[ $EUID -eq 0 ]]; then - log_error "This script should not be run as root. It will use sudo when needed." - exit 1 - fi -} - -detect_ubuntu() { - if [[ ! -f /etc/os-release ]]; then - log_error "Cannot detect OS version. /etc/os-release not found." - exit 1 - fi - - # shellcheck disable=SC1091 - source /etc/os-release - if [[ "${ID:-}" != "ubuntu" ]]; then - log_warn "This script is designed for Ubuntu. Detected: ${ID:-unknown}" - fi - - log_info "Detected Ubuntu ${VERSION_ID:-unknown}" -} - -check_wsl2() { - if [[ ! -f /proc/version ]]; then - log_warn "Cannot verify WSL environment" - return - fi - - if grep -qi microsoft /proc/version; then - log_info "Running in WSL environment" - else - log_warn "Not running in WSL. This script is designed for WSL2." - fi -} - -install_docker() { - log_info "Checking Docker installation..." - - if command -v docker >/dev/null 2>&1; then - log_info "Docker already installed: $(docker --version)" - else - log_info "Installing Docker..." - sudo apt-get update -q - sudo apt-get install -y \ - ca-certificates \ - curl \ - gnupg \ - lsb-release \ - software-properties-common - - sudo install -m 0755 -d /etc/apt/keyrings - if [[ ! -f /etc/apt/keyrings/docker.gpg ]]; then - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg - sudo chmod a+r /etc/apt/keyrings/docker.gpg - fi - - local codename - codename=$(. /etc/os-release && echo "$VERSION_CODENAME") - sudo tee /etc/apt/sources.list.d/docker.list >/dev/null </dev/null 2>&1; then - log_info "Python already installed: $(python3 --version)" - else - sudo apt-get update -q - sudo apt-get install -y python3 python3-pip python3-venv python3-full - fi -} - -install_utilities() { - log_info "Installing system utilities..." - - sudo apt-get update -q - sudo apt-get install -y \ - jq \ - git \ - curl \ - wget \ - rsync \ - net-tools \ - dnsutils \ - htop \ - ncdu \ - tree \ - unzip \ - zip \ - httpie \ - python3-rich \ - pipx -} - -configure_docker_wsl2() { - log_info "Configuring Docker for WSL..." - - local docker_config_dir="/etc/docker" - local docker_config_file="$docker_config_dir/daemon.json" - - if [[ ! -f "$docker_config_file" ]]; then - sudo mkdir -p "$docker_config_dir" - sudo tee "$docker_config_file" >/dev/null <<'EOF' -{ - "log-driver": "json-file", - "log-opts": { - "max-size": "10m", - "max-file": "3" - }, - "features": { - "containerd-snapshotter": true - }, - "iptables": false -} -EOF - sudo systemctl restart docker - fi - - local bashrc_file="$HOME/.bashrc" - if ! grep -q 'WSL Docker helpers' "$bashrc_file" 2>/dev/null; then - cat >> "$bashrc_file" <<'EOF' - -# WSL Docker helpers -export DOCKER_HOST=unix:///var/run/docker.sock -EOF - fi -} - -handle_windows_paths() { - log_info "Ensuring project symlink exists..." - if [[ ! -L "$HOME/ableton-mcp-ai" ]]; then - ln -sfn "$PROJECT_ROOT" "$HOME/ableton-mcp-ai" - fi -} - -install_python_dependencies() { - log_info "Preparing local virtual environment..." - mkdir -p "$RUNTIME_DIR" - - if [[ ! -d "$VENV_DIR" ]]; then - python3 -m venv "$VENV_DIR" - fi - - # shellcheck disable=SC1091 - source "$VENV_DIR/bin/activate" - python -m pip install --upgrade pip - - local found_req=false - local requirements_files=( - "$PROJECT_ROOT/MCP_Server/requirements.txt" - "$PROJECT_ROOT/requirements.txt" - ) - - for req_file in "${requirements_files[@]}"; do - if [[ -f "$req_file" ]]; then - log_info "Installing dependencies from: $req_file" - python -m pip install -r "$req_file" - found_req=true - fi - done - - if [[ "$found_req" == "false" ]]; then - log_warn "No requirements.txt files found" - fi - - deactivate -} - -verify_installation() { - log_info "Verifying installation..." - - local all_good=true - - if command -v docker >/dev/null 2>&1; then - log_info "OK Docker: $(docker --version)" - else - log_error "FAIL Docker not found" - all_good=false - fi - - if docker compose version >/dev/null 2>&1; then - log_info "OK Docker Compose: $(docker compose version)" - else - log_error "FAIL Docker Compose not found" - all_good=false - fi - - if command -v python3 >/dev/null 2>&1; then - log_info "OK Python: $(python3 --version)" - else - log_error "FAIL Python3 not found" - all_good=false - fi - - if [[ -x "$VENV_DIR/bin/python" ]]; then - log_info "OK Venv: $VENV_DIR" - else - log_error "FAIL Venv not found at $VENV_DIR" - all_good=false - fi - - if command -v jq >/dev/null 2>&1; then - log_info "OK jq installed" - else - log_error "FAIL jq not found" - all_good=false - fi - - if [[ "$all_good" == "true" ]]; then - log_info "All dependencies installed successfully" - return 0 - fi - - log_error "Some dependencies failed to install" - return 1 -} - -main() { - log_info "Starting AbletonMCP-AI WSL installation..." - echo - - check_sudo - detect_ubuntu - check_wsl2 - echo - - install_docker - install_python - install_utilities - configure_docker_wsl2 - handle_windows_paths - install_python_dependencies - echo - - verify_installation - echo - - log_info "Installation complete" - log_info "Next step: run ./setup.sh and then ./start.sh" -} - -main "$@" diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/install_systemd.sh b/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/install_systemd.sh deleted file mode 100644 index c6c12d5..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/install_systemd.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -WSL_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" -SYSTEMD_DIR="$WSL_DIR/systemd" - -if [[ $EUID -ne 0 ]]; then - echo "Run with sudo" - exit 1 -fi - -for service_file in "$SYSTEMD_DIR"/*.service; do - cp "$service_file" /etc/systemd/system/"$(basename "$service_file")" -done - -systemctl daemon-reload -systemctl enable abletonmcp-stack.service abletonmcp-queue-runner.service -echo "Installed systemd units" -echo "Enabled by default: abletonmcp-stack.service, abletonmcp-queue-runner.service" -echo "Optional unit left disabled: abletonmcp-glm-runner.service" diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/logs.sh b/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/logs.sh deleted file mode 100644 index 10e3c30..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/logs.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -WSL_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" -AUTOMATION_DIR="$(cd "$WSL_DIR/.." && pwd)" -DOCKER_ENV_FILE="$WSL_DIR/.env" -COMPOSE_FILE="$WSL_DIR/docker-compose.yml" -LOGS_DIR="$AUTOMATION_DIR/wsl_runtime/logs" - -follow="${1:-all}" - -compose_cmd() { - docker compose --env-file "$DOCKER_ENV_FILE" -f "$COMPOSE_FILE" "$@" -} - -case "$follow" in - docker) - compose_cmd logs -f - ;; - queue) - tail -f "$LOGS_DIR/queue-runner.log" - ;; - all) - compose_cmd logs -f & - docker_pid=$! - if [[ -f "$LOGS_DIR/queue-runner.log" ]]; then - tail -f "$LOGS_DIR/queue-runner.log" & - tail_pid=$! - wait "$docker_pid" "$tail_pid" - else - wait "$docker_pid" - fi - ;; - *) - echo "Usage: $0 [all|docker|queue]" - exit 1 - ;; -esac diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/restart.sh b/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/restart.sh deleted file mode 100644 index 08c9870..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/restart.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - -"$SCRIPT_DIR/stop.sh" -sleep 2 -"$SCRIPT_DIR/start.sh" diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/setup.sh b/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/setup.sh deleted file mode 100644 index 5b14825..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/setup.sh +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -readonly RED='\033[0;31m' -readonly GREEN='\033[0;32m' -readonly YELLOW='\033[1;33m' -readonly BLUE='\033[0;34m' -readonly NC='\033[0m' - -log_info() { echo -e "${GREEN}[INFO]${NC} $*"; } -log_warn() { echo -e "${YELLOW}[WARN]${NC} $*"; } -log_step() { echo -e "${BLUE}[STEP]${NC} $*"; } - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -WSL_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" -AUTOMATION_DIR="$(cd "$WSL_DIR/.." && pwd)" -PROJECT_ROOT="$(cd "$AUTOMATION_DIR/.." && pwd)" -RUNTIME_DIR="$AUTOMATION_DIR/wsl_runtime" -DOCKER_ENV_FILE="$WSL_DIR/.env" -RUNNER_ENV_FILE="$AUTOMATION_DIR/wsl.local.env" -PROJECT_LINK="$HOME/ableton-mcp-ai" - -generate_secret() { - openssl rand -hex "${1:-16}" 2>/dev/null || python3 - <<'PY' -import secrets -print(secrets.token_hex(16)) -PY -} - -ensure_dirs() { - log_step "Creating runtime directories" - mkdir -p \ - "$RUNTIME_DIR/logs" \ - "$RUNTIME_DIR/pids" \ - "$RUNTIME_DIR/data" \ - "$AUTOMATION_DIR/reports" \ - "$AUTOMATION_DIR/runs" \ - "$AUTOMATION_DIR/tasks" \ - "$AUTOMATION_DIR/workflows" \ - "$WSL_DIR/initdb" -} - -ensure_symlink() { - if [[ ! -L "$PROJECT_LINK" ]]; then - ln -sfn "$PROJECT_ROOT" "$PROJECT_LINK" - fi - log_info "Project link: $PROJECT_LINK" -} - -write_docker_env() { - if [[ -f "$DOCKER_ENV_FILE" ]]; then - log_info "Docker env already exists: $DOCKER_ENV_FILE" - return - fi - - log_step "Generating docker env" - cat > "$DOCKER_ENV_FILE" < "$RUNNER_ENV_FILE" </dev/null || { log_error "Docker is not installed"; exit 1; } - docker compose version >/dev/null || { log_error "Docker Compose plugin is not available"; exit 1; } - docker info >/dev/null || { log_error "Docker daemon is not running"; exit 1; } - [[ -f "$DOCKER_ENV_FILE" ]] || { log_error "Missing docker env: $DOCKER_ENV_FILE"; exit 1; } - [[ -f "$COMPOSE_FILE" ]] || { log_error "Missing compose file: $COMPOSE_FILE"; exit 1; } -} - -wait_for_postgres() { - log_info "Waiting for PostgreSQL" - for _ in $(seq 1 60); do - if compose_cmd exec -T postgres pg_isready -U "${POSTGRES_USER:-postgres}" -d "${POSTGRES_BOOTSTRAP_DB:-postgres}" >/dev/null 2>&1; then - return 0 - fi - sleep 2 - done - log_error "PostgreSQL did not become ready in time" - exit 1 -} - -wait_for_service_http() { - local service="$1" - local url="$2" - log_info "Waiting for $service" - for _ in $(seq 1 60); do - if curl -fsS "$url" >/dev/null 2>&1; then - return 0 - fi - sleep 2 - done - log_warn "$service is not healthy yet: $url" - return 1 -} - -ensure_database() { - local db_name="$1" - if compose_cmd exec -T postgres psql -U "${POSTGRES_USER:-postgres}" -d "${POSTGRES_BOOTSTRAP_DB:-postgres}" -tAc "SELECT 1 FROM pg_database WHERE datname='${db_name}'" | grep -q 1; then - return 0 - fi - compose_cmd exec -T postgres psql -U "${POSTGRES_USER:-postgres}" -d "${POSTGRES_BOOTSTRAP_DB:-postgres}" -c "CREATE DATABASE \"${db_name}\"" -} - -ensure_gitea_admin() { - local user="${GITEA_ADMIN_USER:-giteaadmin}" - local password="${GITEA_ADMIN_PASSWORD:-changeme}" - local email="${GITEA_ADMIN_EMAIL:-admin@localhost}" - if compose_cmd exec -T gitea sh -c "HOME=/tmp /usr/local/bin/gitea admin user list 2>/dev/null | awk 'NR > 1 && \$2 == \"${user}\" { found=1 } END { exit found ? 0 : 1 }'"; then - return 0 - fi - compose_cmd exec -T gitea sh -c "HOME=/tmp /usr/local/bin/gitea admin user create --admin --username '${user}' --password '${password}' --email '${email}' --must-change-password=false" >/dev/null 2>&1 || log_warn "Could not auto-create Gitea admin user; complete first-run in UI if needed" -} - -start_docker_stack() { - log_step "Starting Docker services" - compose_cmd up -d postgres redis - wait_for_postgres - ensure_database "${GITEA_DB_NAME:-gitea}" - ensure_database "${N8N_DB_NAME:-n8n}" - compose_cmd up -d gitea n8n - wait_for_service_http "Gitea" "http://localhost:${GITEA_HTTP_PORT:-3000}/api/healthz" || true - wait_for_service_http "n8n" "http://localhost:${N8N_PORT:-5678}/healthz" || true - ensure_gitea_admin -} - -start_queue_runner() { - if [[ "$START_QUEUE_RUNNER" != "1" ]]; then - log_info "Queue runner startup skipped by START_QUEUE_RUNNER=$START_QUEUE_RUNNER" - return - fi - - if command -v systemctl >/dev/null 2>&1 && systemctl is-active abletonmcp-queue-runner.service >/dev/null 2>&1; then - log_info "Queue runner already managed by systemd" - return - fi - - local pid_file="$PID_DIR/queue-runner.pid" - if [[ -f "$pid_file" ]] && kill -0 "$(cat "$pid_file")" 2>/dev/null; then - log_info "Queue runner already running" - return - fi - - log_step "Starting autonomous queue runner" - nohup bash "$WSL_DIR/run_task_queue.sh" > "$LOGS_DIR/queue-runner.log" 2>&1 & - echo $! > "$pid_file" - log_info "Queue runner PID: $(cat "$pid_file")" -} - -main() { - check_prerequisites - start_docker_stack - start_queue_runner - echo - log_info "Stack started" - echo " Gitea: http://localhost:${GITEA_HTTP_PORT:-3000}" - echo " n8n: http://localhost:${N8N_PORT:-5678}" -} - -main "$@" diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/status.sh b/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/status.sh deleted file mode 100644 index 691ea89..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/status.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -readonly GREEN='\033[0;32m' -readonly YELLOW='\033[1;33m' -readonly BLUE='\033[0;34m' -readonly RED='\033[0;31m' -readonly NC='\033[0m' - -ok() { echo -e "${GREEN}OK${NC} $*"; } -warn() { echo -e "${YELLOW}WARN${NC} $*"; } -fail() { echo -e "${RED}FAIL${NC} $*"; } -step() { echo -e "${BLUE}$*${NC}"; } - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -WSL_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" -AUTOMATION_DIR="$(cd "$WSL_DIR/.." && pwd)" -DOCKER_ENV_FILE="$WSL_DIR/.env" -COMPOSE_FILE="$WSL_DIR/docker-compose.yml" -PID_DIR="$AUTOMATION_DIR/wsl_runtime/pids" -LOGS_DIR="$AUTOMATION_DIR/wsl_runtime/logs" - -compose_cmd() { - docker compose --env-file "$DOCKER_ENV_FILE" -f "$COMPOSE_FILE" "$@" -} - -step "Docker" -if command -v docker >/dev/null 2>&1 && docker info >/dev/null 2>&1; then - ok "docker daemon running" -else - fail "docker daemon unavailable" -fi -echo - -step "Compose services" -if command -v docker >/dev/null 2>&1 && [[ -f "$COMPOSE_FILE" ]]; then - compose_cmd ps || true -else - warn "compose file or docker missing" -fi -echo - -step "Queue runner" -if [[ -f "$PID_DIR/queue-runner.pid" ]] && kill -0 "$(cat "$PID_DIR/queue-runner.pid")" 2>/dev/null; then - ok "queue runner PID $(cat "$PID_DIR/queue-runner.pid")" -elif command -v systemctl >/dev/null 2>&1 && systemctl is-active abletonmcp-queue-runner.service >/dev/null 2>&1; then - ok "queue runner managed by systemd" -else - warn "queue runner not running" -fi -echo - -step "Logs" -if [[ -d "$LOGS_DIR" ]]; then - ls -1 "$LOGS_DIR" | sed 's/^/ - /' -else - warn "no logs directory" -fi diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/stop.sh b/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/stop.sh deleted file mode 100644 index c85919a..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/scripts/stop.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -readonly GREEN='\033[0;32m' -readonly YELLOW='\033[1;33m' -readonly BLUE='\033[0;34m' -readonly NC='\033[0m' - -log_info() { echo -e "${GREEN}[INFO]${NC} $*"; } -log_warn() { echo -e "${YELLOW}[WARN]${NC} $*"; } -log_step() { echo -e "${BLUE}[STEP]${NC} $*"; } - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -WSL_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" -AUTOMATION_DIR="$(cd "$WSL_DIR/.." && pwd)" -DOCKER_ENV_FILE="$WSL_DIR/.env" -COMPOSE_FILE="$WSL_DIR/docker-compose.yml" -PID_DIR="$AUTOMATION_DIR/wsl_runtime/pids" - -compose_cmd() { - docker compose --env-file "$DOCKER_ENV_FILE" -f "$COMPOSE_FILE" "$@" -} - -stop_runner() { - local pid_file="$1" - if [[ ! -f "$pid_file" ]]; then - return - fi - local pid - pid="$(cat "$pid_file")" - if kill -0 "$pid" 2>/dev/null; then - kill -TERM "$pid" 2>/dev/null || true - sleep 2 - kill -KILL "$pid" 2>/dev/null || true - fi - rm -f "$pid_file" -} - -main() { - log_step "Stopping queue runner" - stop_runner "$PID_DIR/queue-runner.pid" - echo - log_step "Stopping Docker services" - if command -v docker >/dev/null 2>&1; then - compose_cmd down "$@" || true - else - log_warn "Docker not installed" - fi - log_info "Stack stopped" -} - -main "$@" diff --git a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/send_telegram.sh b/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/send_telegram.sh deleted file mode 100644 index 7f55670..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/automation/wsl/send_telegram.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" -LOCAL_ENV_FILE="${LOCAL_ENV_FILE:-$PROJECT_ROOT/automation/wsl.local.env}" - -if [[ -f "$LOCAL_ENV_FILE" ]]; then - # shellcheck disable=SC1090 - source "$LOCAL_ENV_FILE" -fi - -MESSAGE="${1:-}" -if [[ -z "$MESSAGE" ]]; then - exit 0 -fi - -BOT_TOKEN="${TELEGRAM_BOT_TOKEN:-}" -CHAT_ID="${TELEGRAM_CHAT_ID:-}" - -if [[ -z "$BOT_TOKEN" || -z "$CHAT_ID" ]]; then - exit 0 -fi - -curl -fsS -X POST "https://api.telegram.org/bot${BOT_TOKEN}/sendMessage" \ - --data-urlencode "chat_id=${CHAT_ID}" \ - --data-urlencode "text=${MESSAGE}" \ - --data "disable_web_page_preview=true" >/dev/null diff --git a/AbletonMCP_AI_BAK_20260328_200801/load_samples.py b/AbletonMCP_AI_BAK_20260328_200801/load_samples.py deleted file mode 100644 index 6e58efc..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/load_samples.py +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env python -""" -Script para cargar samples en Ableton MCP AI -Este script guía al usuario para cargar samples manualmente o usa el browser -""" -import os - -# Configuración de samples -SAMPLES_CONFIG = { - "kick": r"C:\Users\ren\embeddings\all_tracks\BBH - Primer Impacto - Kick 1.wav", - "clap": r"C:\Users\ren\embeddings\all_tracks\MT Clap & Snare Hit 05.wav", - "hat": r"C:\Users\ren\embeddings\all_tracks\BBH - Primer Impacto - Closed Hat 3.wav", - "bass": r"C:\Users\ren\embeddings\all_tracks\MT_Bass Loop 04 F 125.wav", -} - -def generate_instrument_setup_guide(): - """Genera instrucciones detalladas para cargar samples""" - - guide = """ -╔══════════════════════════════════════════════════════════════════╗ -║ CONFIGURACIÓN DE INSTRUMENTOS - HOUSE 90s ║ -╚══════════════════════════════════════════════════════════════════╝ - -Para que suene tu track, necesitas cargar instrumentos en cada track MIDI. - -🥁 TRACK 0 - KICK (Rojo): - 1. Arrastra "Drum Rack" del browser al track - 2. Arrastra tu sample de kick al pad C1 (nota 36) - 3. Ajusta volumen a -3dB - -👏 TRACK 1 - CLAP (Naranja): - 1. Mismo Drum Rack o uno nuevo - 2. Arrastra sample de clap/snare al pad D2 (nota 50) - 3. Volumen a -6dB - -🎩 TRACK 2 - HIHAT (Amarillo): - 1. Drum Rack - 2. Sample de closed hat al pad F#1 (nota 42) - 3. Volumen a -12dB - -🎸 TRACK 3 - BASS (Azul): - Opción A (Sampler): - 1. Arrastra "Simpler" al track - 2. Arrastra loop de bass (MT_Bass Loop 04 F 125.wav) - 3. Ajusta para que C3 dispare el sample - - Opción B (Synth): - 1. Carga "Operator" - 2. Preset "Sub Bass" o "Funky Bass" - 3. Ajusta envolvente: Attack 5ms, Decay 200ms, Sustain 80% - -🎹 TRACK 4 - CHORDS (Purpura): - 1. Carga "Wavetable" o "Analog" - 2. Preset "House Chords", "Chord Stab" o "Vintage Keys" - 3. Añade reverb (Return A) al 20% - -═══════════════════════════════════════════════════════════════════ - -📁 SAMPLES RECOMENDADOS DE TU LIBRERÍA: - -Kick: BBH - Primer Impacto - Kick 1.wav -Clap: MT Clap & Snare Hit 05.wav -Hat: BBH - Primer Impacto - Closed Hat 3.wav -Bass: MT_Bass Loop 04 F 125.wav - -═══════════════════════════════════════════════════════════════════ - -⚡ ATAJO RÁPIDO: -Si tienes Drum Rack presets guardados: -1. Busca en el browser: "Drums > Drum Rack" -2. Arrastra a cada track de drums -3. Los clips MIDI ya están programados y sonarán automáticamente - -═══════════════════════════════════════════════════════════════════ -""" - return guide - - -def verify_samples(): - """Verifica qué samples existen""" - samples_dir = r"C:\Users\ren\embeddings\all_tracks" - - print("\n📂 Verificando samples en librería...") - print(f"Directorio: {samples_dir}") - print("-" * 50) - - if not os.path.exists(samples_dir): - print("❌ Directorio no encontrado!") - return False - - # Buscar archivos comunes - found = [] - for f in os.listdir(samples_dir)[:20]: # Primeros 20 - if f.endswith('.wav'): - found.append(f) - - print(f"✓ {len(found)} archivos WAV encontrados") - print("\nEjemplos:") - for f in found[:10]: - print(f" - {f}") - - return True - - -if __name__ == "__main__": - print(generate_instrument_setup_guide()) - verify_samples() diff --git a/AbletonMCP_AI_BAK_20260328_200801/roadmap.md b/AbletonMCP_AI_BAK_20260328_200801/roadmap.md deleted file mode 100644 index 0d06b52..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/roadmap.md +++ /dev/null @@ -1,215 +0,0 @@ -# AbletonMCP-AI — Tech House Professional DJ Roadmap - -> Repositorio: AbletonMCP_AI | Foco: **Tech House** (122–128 BPM) -> Última actualización: 2026-03-28 - ---- - -## Estado actual del sistema - -El sistema tiene una base sólida: -- `song_generator.py` (~6k líneas): blueprints de secciones, perfiles de arrangement, bus system, gain calibration, device automation por sección -- `vector_manager.py`: semantic search + filtros de duración (evita canciones completas) -- `role_matcher.py`: validación de samples por rol con thresholds y penalizaciones -- `sample_selector.py`: selección de samples con compatibilidad BPM/key -- `audio_resampler.py`: resample layers y análisis -- `server.py` (~7k líneas): herramientas MCP expuestas al AI - ---- - -## 🔴 BUG FIXES — Prioridad crítica - -### BF-01: Track "AUDIO ATMOS 2" con canción completa -- **Problema**: El filtro de duración (max 45s) no se aplica consistentemente en todos los paths de carga -- **Causa**: `_build_audio_fallback_sample_paths` en `server.py` tiene glob patterns que ignoran el filtro de `vector_manager` -- **Fix**: Reindexar la librería con `reindex_library` y verificar que `_find_library_file` rechaza archivos donde `soundfile` lanza excepción (duración = -1) -- **Archivo**: `server.py` → `_find_library_file`, `_build_audio_fallback_sample_paths` - -### BF-02: Embeddings desactualizados post-cambios -- **Problema**: Al agregar nuevos samples a la librería, el índice `.sample_embeddings.json` no se reconstruye automáticamente -- **Fix**: Agregar un hash de fingerprint del directorio (mtime o conteo) al índice y validar en cada arranque -- **Archivo**: `vector_manager.py` → `_load_or_build_index` - -### BF-03: Colisión de nombres de track "AUDIO X 2" -- **Problema**: Cuando se crea un segundo track con el mismo nombre, Ableton le agrega "2" automáticamente y luego el sistema no lo encuentra por nombre -- **Fix**: Normalizar búsquedas de tracks usando índice numérico en vez de nombre como identificador primario -- **Archivo**: `server.py` → `_mute_tracks_for_audio_layers`, `_normalize_track_name` - -### BF-04: Linting errors restantes -- **Problema**: Múltiples errores de ruff reportados en `ruff_errors.txt` (principalmente F821 undefined names, E501 line length) -- **Fix**: Pasar `ruff check --fix` y revisar los F821 manualmente -- **Archivos**: `server.py`, `song_generator.py` - -### BF-05: `soundfile` excepción silenciosa permite archivos inválidos -- **Problema**: Si `soundfile` falla al leer un archivo, se asigna `duration = -1` pero el archivo igual puede ser insertado en escenarios de fallback -- **Fix**: En `_find_library_file`, `duration < 0` debe ser rechazado explícitamente también en el branch de fallback de `glob` -- **Archivo**: `server.py` → `_find_library_file` - ---- - -## 🟠 MEJORAS CORE — Tech House específico - -### MJ-01: Blueprints de sección optimizados para Tech House DJ -- **Qué**: Los blueprints actuales (`standard`, `extended`, `club`) son genéricos. Tech House DJ requiere intros/outros de 16-32 bars para beatmatching -- **Cambio**: - ```python - 'tech-house-dj': [ - ('INTRO DJ', 32, 8, 'intro', 1), # 32 bars solo kick+bass para mezcla - ('GROOVE A', 16, 16, 'build', 2), - ('VOX TEASE', 8, 20, 'build', 3), - ('DROP A', 32, 30, 'drop', 5), - ('BREAK', 8, 22, 'break', 1), - ('BUILD', 8, 24, 'build', 3), - ('DROP B', 32, 32, 'drop', 5), - ('OUTRO DJ', 32, 8, 'outro', 1), # 32 bars solo kick+bass para salida - ] - ``` -- **Archivo**: `song_generator.py` → `SECTION_BLUEPRINTS` - -### MJ-02: Patrones rítmicos tech house propios -- **Qué**: Los patrones de kick/hat/perc están en `create_drum_pattern` (server.py) como presets genéricos. Tech House usa swing, offbeat hats, y kicks con ghost notes -- **Cambio**: Agregar presets `'tech-house-swing'`, `'tech-house-jackin'`, `'tech-house-minimal'` con: - - Kick en 1 y 3 con variaciones en 2.5 y 3.5 - - Hi-hat con swing 16% y offbeats en 1/8 - - Clap/snare en 2 y 4 con ghost notes -- **Archivo**: `server.py` → `create_drum_pattern` - -### MJ-03: Bass lines tech house -- **Qué**: `create_bassline` genera 4 estilos genéricos. Tech House requiere basslines sincopadas y groovy -- **Cambio**: Agregar estilo `'tech-house'` con notas en posiciones off-beat, slides, y variaciones de velocidad para groove -- **Archivo**: `server.py` → `create_bassline` - -### MJ-04: Chord progressions tech house -- **Qué**: `CHORD_PROGRESSIONS` en `song_generator.py` no tiene entradas específicas para tech house -- **Cambio**: Agregar progressiones: - - Am → Fm → Gm (oscura, hipnótica) - - Dm → Am → Dm (loop de dos acordes para drop) - - Cm → Gm (minimalista con tensión) -- **Archivo**: `song_generator.py` → `CHORD_PROGRESSIONS` - -### MJ-05: Estilo Latin Tech House -- **Qué**: El sistema tiene menciones de `latin-industrial` (Eli Brown) pero no tiene patrones de percusión latina implementados -- **Cambio**: Agregar preset `'latin-tech-house'` con: - - Conga / bongo patterns como perc layer - - Bass con notas sincopadas al estilo afro-percusivo - - Vocal shots ("ey", "come on") en offbeats -- **Archivo**: `song_generator.py`, `server.py` - -### MJ-06: Genre keyword expansion en VectorManager -- **Qué**: Las búsquedas semánticas usan strings genéricos. Tech house tiene vocabulario específico -- **Cambio**: Agregar diccionario de términos preferidos por género que enriquecen el query: - ```python - GENRE_SEARCH_TERMS = { - 'tech-house': ['groovy', 'driving', 'punchy', 'jackin', 'swinging', 'hypnotic'], - 'house': ['deep', 'soulful', 'warm', 'classic'], - ... - } - ``` -- **Archivo**: `vector_manager.py` o `server.py` - -### MJ-07: Reindex automático al detectar cambios en librería -- **Qué**: El índice de embeddings solo se reconstruye manualmente. Si el usuario agrega samples, no se detectan -- **Cambio**: Al iniciar `VectorManager`, comparar el conteo de archivos actual vs el del índice. Si difieren, rebuild automático -- **Archivo**: `vector_manager.py` → `_load_or_build_index` - ---- - -## 🟡 MEJORAS DJ PRO — Funcionalidades de DJ profesional - -### DJ-01: Track Stems export / bus routing visible -- **Qué**: Un DJ profesional necesita poder exportar stems (kick, bass, music, fx) separados -- **Cambio**: Agregar herramienta `export_stems_config()` que configura los buses para exportación de stems individual, nombrando y coloreando cada bus consistentemente -- **Archivo**: `server.py` (nuevo tool) - -### DJ-02: Harmonic mixing — Camelot wheel -- **Qué**: El sistema elige keys pero no verifica compatibilidad con Camelot wheel para mezcla armónica -- **Cambio**: Agregar función `get_compatible_keys(current_key)` que devuelve keys compatibles en la rueda de Camelot (±1 tono, relativo mayor/menor). Usar en `suggest_key_change` -- **Archivo**: `server.py` → `suggest_key_change` - -### DJ-03: BPM grid automático — Sync markers -- **Qué**: Al generar una canción con intro DJ de 32 bars, colocar marcadores de Ableton (`locators`) en los puntos exactos de cada sección para que el DJ pueda saltar entre puntos -- **Cambio**: Usar el comando `create_arrangement_locator` de Ableton API para marcar cada sección -- **Archivo**: `server.py`, `Remote_Script.py` (agregar comando de socket) - -### DJ-04: Loop regions automáticas -- **Qué**: Marcar los drops como loop regions en Ableton para que el DJ pueda activar el loop con un botón -- **Cambio**: Al generar la canción, colocar punch-in / punch-out en los drops principales -- **Archivo**: `server.py` - -### DJ-05: Energy curve explícita -- **Qué**: El sistema tiene `ROLE_ACTIVITY` con valores de energía por sección pero no hay una curva visible para el usuario -- **Cambio**: Al terminar la generación, imprimir (en el manifest) la curva de energía sección a sección: `[INTRO: 25%] → [BUILD: 70%] → [DROP: 100%]...` -- **Archivo**: `server.py` → manifest / `get_generation_manifest` - -### DJ-06: Referencia de track real — Eli Brown style -- **Qué**: `REFERENCE_TRACK_PROFILES` tiene "Eli Brown - Me Gusta" definido pero no se puede cargar automáticamente una referencia para análisis A/B -- **Cambio**: Hacer funcional el sistema de referencia: si el usuario pone un archivo en `librerias/reference/`, que sea analizable e influya en BPM, key, y energy curve de la generación -- **Archivo**: `server.py`, `audio_resampler.py`, `reference_listener.py` - ---- - -## 🟢 NICE TO HAVE — Calidad de vida - -### NTH-01: Preview de canción antes de generar -- **Qué**: El sistema genera todo de golpe sin preview. Poder ver primero el "blueprint" (qué tracks, qué samples, qué estructura) antes de ejecutar -- **Cambio**: Agregar `preview_generation(genre, style, key, bpm)` que devuelve el manifest sin crear nada en Ableton -- **Archivo**: `server.py`, `song_generator.py` - -### NTH-02: Regeneración selectiva de secciones -- **Qué**: Si el drop no quedó bien, hay que regenerar todo. Debería poder regenerarse solo el drop -- **Cambio**: Agregar `regenerate_section(section_name)` que borra los clips de esa sección y los regenera -- **Archivo**: `server.py` - -### NTH-03: Historial de generaciones -- **Qué**: Solo se guarda el último manifest. Debería haber un historial de las últimas 5 generaciones -- **Cambio**: Guardar manifests en archivos `.json` con timestamp en `librerias/generations/` -- **Archivo**: `server.py` → `_store_generation_manifest` - -### NTH-04: Color coding consistente por género -- **Qué**: Los colores de tracks son estáticos. Tech House podría tener paleta propia (naranja, azul oscuro) -- **Cambio**: Agregar `GENRE_COLOR_PALETTES` y aplicar al generar tracks -- **Archivo**: `song_generator.py` → `TRACK_COLORS` - -### NTH-05: Sample diversity mejorada -- **Qué**: Si la librería tiene 3 kicks, el sistema puede usar el mismo kick en 2 generaciones seguidas -- **Cambio**: Existe `reset_diversity_memory` pero no hay persistencia entre sesiones. Guardar el historial de samples usados en un JSON local -- **Archivo**: `sample_selector.py` - -### NTH-06: Validación de routing en tiempo real -- **Qué**: `validate_set` existe pero no se llama automáticamente al generar -- **Cambio**: Al terminar `generate_track`, llamar automáticamente a `detect_common_issues` y mostrar resumen con numero de errores/warnings -- **Archivo**: `server.py` → `generate_track` - -### NTH-07: howto.md actualizado para Tech House -- **Qué**: El `howto.md` documenta el sistema genérico. Agregar sección específica de "Cómo generar Tech House profesional" con ejemplos de prompts, flujos de trabajo DJ, y settings recomendados -- **Archivo**: `howto.md` - ---- - -## Orden de ejecución recomendado - -| Prioridad | ID | Nombre | Esfuerzo | -|---|---|---|---| -| 1 | BF-01 | Full song en ATMOS track | 1h | -| 2 | BF-02 | Embeddings auto-rebuild | 2h | -| 3 | BF-03 | Colisión de nombres | 2h | -| 4 | MJ-01 | Blueprints DJ 32-bar intro/outro | 1h | -| 5 | MJ-02 | Drum patterns tech house | 2h | -| 6 | DJ-02 | Camelot wheel | 2h | -| 7 | MJ-03 | Bassline tech house | 1h | -| 8 | MJ-05 | Latin tech house preset | 3h | -| 9 | DJ-03 | BPM locators automáticos | 4h | -| 10 | DJ-06 | Referencia de track real | 4h | -| 11 | NTH-01 | Preview pre-generación | 3h | -| 12 | NTH-06 | Auto-validación post-generación | 1h | -| 13 | BF-04 | Linting cleanup | 2h | - ---- - -## Notas arquitectónicas para el salto a Tech House - -1. **BPM default**: cambiar `default_bpm` de `tech-house` de 125 a **126** (sweet spot del género actual) -2. **Key pool**: priorizar `Am`, `Fm`, `Dm` → más oscuras y groovy que las opciones actuales -3. **Swing**: el swing del 8% actual en hats es insuficiente. Tech House moderno usa 12-16% -4. **Sidechain pump**: el threshold actual de -22dB en bass bus es correcto, pero el release de 0.12s es lento. Bajar a 0.08-0.10s para más pump -5. **Atmos tracks**: el vol de 0.50 en `ROLE_MIX['atmos']` es correcto, pero usar filtros HPF altos (>1kHz) para que no compitan con el sub diff --git a/AbletonMCP_AI_BAK_20260328_200801/setup_returns_master.py b/AbletonMCP_AI_BAK_20260328_200801/setup_returns_master.py deleted file mode 100644 index 6218d10..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/setup_returns_master.py +++ /dev/null @@ -1,227 +0,0 @@ -""" -Setup Returns and Master Chain for Ableton Live 12 -Creates return tracks and configures master chain -""" -import socket -import json -import time -import os -from datetime import datetime -from typing import Dict, Any - -LOG_FILE = r"C:\Users\ren\Documents\Ableton\Logs\returns_master.txt" - -def log_message(message): - """Log message to file and console""" - timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") - log_line = f"[{timestamp}] {message}" - print(log_line) - os.makedirs(os.path.dirname(LOG_FILE), exist_ok=True) - with open(LOG_FILE, "a", encoding="utf-8") as f: - f.write(log_line + "\n") - -class AbletonClient: - def __init__(self, host="127.0.0.1", port=9877, timeout=15.0): - self.host = host - self.port = port - self.timeout = timeout - - def send(self, command_type: str, params: Dict[str, Any] = None) -> Dict[str, Any]: - """Send command to Ableton runtime and get response""" - payload = json.dumps({ - "type": command_type, - "params": params or {}, - }).encode("utf-8") + b"\n" - - with socket.create_connection((self.host, self.port), timeout=self.timeout) as sock: - sock.sendall(payload) - reader = sock.makefile("r", encoding="utf-8") - try: - line = reader.readline() - finally: - reader.close() - try: - sock.shutdown(socket.SHUT_RDWR) - except OSError: - pass - - if not line: - return {"status": "error", "message": f"No response for command: {command_type}"} - - return json.loads(line) - -def main(): - log_message("=" * 60) - log_message("STARTING RETURNS AND MASTER CHAIN SETUP") - log_message("=" * 60) - - client = AbletonClient() - - try: - # Get current session info - log_message("\n--- Getting session info ---") - session_info = client.send("get_session_info") - log_message(f"Session: tracks={session_info.get('result', {}).get('num_tracks', 'N/A')}, returns={session_info.get('result', {}).get('num_return_tracks', 'N/A')}") - - # ======================================== - # CREATE RETURN TRACKS - # ======================================== - log_message("\n" + "=" * 60) - log_message("CREATING RETURN TRACKS") - log_message("=" * 60) - - # 1. A-REVERB (Large Hall) - log_message("\n--- Creating A-REVERB return track ---") - reverb_response = client.send("setup_return_track", { - "preset": "reverb_large", - "name": "A-REVERB" - }) - log_message(f"A-REVERB: {reverb_response.get('status')} - {json.dumps(reverb_response.get('result', reverb_response.get('message')), indent=2)}") - - if reverb_response.get("status") == "success": - return_index = reverb_response.get("result", {}).get("index", 0) - log_message(f"Setting A-REVERB volume to 0.70...") - vol_response = client.send("set_track_volume", { - "track_index": return_index, - "volume": 0.70, - "track_type": "return" - }) - log_message(f"Volume set: {vol_response.get('status')}") - - # 2. B-DELAY (Ping Pong) - log_message("\n--- Creating B-DELAY return track ---") - delay_response = client.send("setup_return_track", { - "preset": "delay_pingpong", - "name": "B-DELAY" - }) - log_message(f"B-DELAY: {delay_response.get('status')} - {json.dumps(delay_response.get('result', delay_response.get('message')), indent=2)}") - - if delay_response.get("status") == "success": - return_index = delay_response.get("result", {}).get("index", 1) - log_message(f"Setting B-DELAY volume to 0.65...") - vol_response = client.send("set_track_volume", { - "track_index": return_index, - "volume": 0.65, - "track_type": "return" - }) - log_message(f"Volume set: {vol_response.get('status')}") - - # 3. C-COMPRESSOR (Parallel compression for sidechain pumping) - log_message("\n--- Creating C-COMPRESSOR return track ---") - comp_response = client.send("setup_return_track", { - "preset": "parallel_comp", - "name": "C-COMPRESSOR" - }) - log_message(f"C-COMPRESSOR: {comp_response.get('status')} - {json.dumps(comp_response.get('result', comp_response.get('message')), indent=2)}") - - if comp_response.get("status") == "success": - return_index = comp_response.get("result", {}).get("index", 2) - log_message(f"Setting C-COMPRESSOR volume to 0.80...") - vol_response = client.send("set_track_volume", { - "track_index": return_index, - "volume": 0.80, - "track_type": "return" - }) - log_message(f"Volume set: {vol_response.get('status')}") - - # ======================================== - # MASTER CHAIN SETUP - # ======================================== - log_message("\n" + "=" * 60) - log_message("SETTING UP MASTER CHAIN") - log_message("=" * 60) - - # Get current master devices - log_message("\n--- Getting current master devices ---") - master_devices = client.send("get_devices", { - "track_type": "master", - "track_index": 0 - }) - devices_list = master_devices.get("result", []) - if isinstance(devices_list, list): - log_message(f"Current master devices: {[d.get('name', '?') if isinstance(d, dict) else str(d) for d in devices_list]}") - else: - log_message(f"Master devices response: {master_devices}") - - # Setup master chain - log_message("\n--- Loading master chain devices ---") - client.timeout = 30.0 - master_chain_response = client.send("setup_master_chain", { - "devices": ["Utility", "EQ Eight", "Compressor", "Limiter"], - "parameters": { - "Utility": { - "Gain": 0.0 - }, - "EQ Eight": { - "Mode": "Stereo" - }, - "Compressor": { - "Threshold": -18.0, - "Ratio": 2.0, - "Attack": 10.0, - "Release": 80.0, - "Makeup": 2.0 - }, - "Limiter": { - "Ceiling": -0.3, - "Release": 50.0 - } - } - }) - log_message(f"Master chain: {master_chain_response.get('status')} - {json.dumps(master_chain_response.get('result', master_chain_response.get('message')), indent=2)}") - - # Set master volume to 0.85 - log_message("\n--- Setting master volume to 0.85 ---") - master_vol_response = client.send("set_track_volume", { - "track_index": 0, - "volume": 0.85, - "track_type": "master" - }) - log_message(f"Master volume: {master_vol_response.get('status')}") - - # ======================================== - # VERIFICATION - # ======================================== - log_message("\n" + "=" * 60) - log_message("VERIFICATION") - log_message("=" * 60) - - # Get final session info - log_message("\n--- Final session info ---") - final_session = client.send("get_session_info") - result = final_session.get("result", {}) - log_message(f"Tracks: {result.get('num_tracks')}, Returns: {result.get('num_return_tracks')}, Scenes: {result.get('num_scenes')}") - - # Get final master devices - log_message("\n--- Final master devices ---") - final_master = client.send("get_devices", { - "track_type": "master", - "track_index": 0 - }) - devices_list = final_master.get("result", []) - if isinstance(devices_list, list): - for d in devices_list: - if isinstance(d, dict): - log_message(f" - {d.get('name', '?')}") - - # Verify return tracks - log_message("\n--- Return tracks ---") - for i in range(3): - ret_info = client.send("get_track_info", { - "track_index": i, - "track_type": "return" - }) - result = ret_info.get("result", {}) - log_message(f" Return {i}: {result.get('name', '?')} - Volume: {result.get('volume', '?'):.2f}" if isinstance(result.get('volume'), (int, float)) else f" Return {i}: {result.get('name', '?')}") - - log_message("\n" + "=" * 60) - log_message("SETUP COMPLETE") - log_message("=" * 60) - - except Exception as e: - log_message(f"Error: {e}") - import traceback - log_message(traceback.format_exc()) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/AbletonMCP_AI_BAK_20260328_200801/start_server.bat b/AbletonMCP_AI_BAK_20260328_200801/start_server.bat deleted file mode 100644 index 8d8be55..0000000 --- a/AbletonMCP_AI_BAK_20260328_200801/start_server.bat +++ /dev/null @@ -1,27 +0,0 @@ -@echo off -echo ============================================ -echo AbletonMCP-AI Server -echo ============================================ -echo. -echo Iniciando servidor MCP... -echo Conectando a Ableton en localhost:9877 -echo. -echo Asegurate de que: -echo 1. Ableton Live 12 esta abierto -echo 2. El Control Surface 'AbletonMCP_AI' esta seleccionado -echo en Preferencias ^> Link/Tempo/MIDI -echo. -echo Presiona Ctrl+C para detener -echo ============================================ -echo. - -cd /d "%~dp0\MCP_Server" - -python server.py -if errorlevel 1 ( - echo. - echo ERROR: No se pudo iniciar el servidor - echo Verifica que Python esta instalado y en el PATH - echo. - pause -) diff --git a/Axiom_25_Classic/Preset.syx b/Axiom_25_Classic/Preset.syx deleted file mode 100644 index 5a3602111e2508bb61248b99732ce794a9366b06..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1326 zcma))IZwkt5QQgRC-ITP-W{$E(Lk9WfKU)Zq~J=z4ITH95-OzpWjb2CyKf|ixX6rT z>&+X_j{NNptw;GZ&|E`MDNcWXoUQa$9Q;d)i=>*(NZF*+W{OWj&C`l_gI(AB^!)Pr z_Wm&`okub3hR%y}g2-HK!fWactfJ!CEUg%zInsH*-{0Qp@9ym%40vgIWp(WvUaa%E z4IkkpZh`V@L}PfVZWTt&nZ@%Q<(5_ys2ixZR=r;D7^ zY{^M@a-Z;;yZ|}50uj%X`@o=>=gHl}OJ)}EiwxnVJVWjT+NEI_uk{SOQ&p2Q1*-|o^hN=wO##;9J;ofPv+DOBu$Afq%nG9qoxfC*zamu{2 zzZ-U0z+^-2QI0R)>F~jn()unZXKY)r1BrsaC2%c-r)t z5xL+IkGZrnq&(w!8VVFh)Og+6>e~9`&D+f_^Wx>g;wu(}Ttu;4J%%b(<2f__fGYDX z*RwR9wxP;Z&*lTSX0HcG_mp6ER|@+|EL;1C)qxTx*1jVgTCufn2n!!lw<8~Z#i~+? z+ss}*BcF>284TppCTyh=ytbbPko& zH9#}KR8S%J6rdah672xm+1=Ye_;PsE0_Z(dsh$VW1yq@DnU1@4--0UFvjB-5q;@`n{u7`K1rjq4pm$KEW*$I9bJlFRYSuq&qE>Di v0DVHbXB-tDjT2Bz{AKUF-FbGW&!Nv$>5b2b z?^_C0dV2gl6yz5dgQ1~JE1D0k=l=Zezu+=+t*9M!$6cXQg1YZiMZKm#f^5b zUCS)Xfvj5|R*H6<@E3tQ3WZq!b{x;%`gTXA=;9TepwSAnlP%j7v}1%V01Ld^}c6=T)q<2$J=qE+lhp} zHtePSGGA|WqovYy;cO4s`z4dHde)04FTZ|6+LLFVjo7mZc{YouAf7F0Pl0*1V$U|@ z*)AUa1nfwAip;YUdv+nuZt;}l_p>MMDKXDp?AeDr`^8fh&w;e3%sdCNCqSNI@l?cf zDD9~*&tdGzAkPu;RK;^N?Wr=)2=*L9p5x-FiRVPxQ)8Z!*mDYbPKzg(6;YTL<+99l z2Ai_Tlv9&VLf)9@CKRx#h)g9l=_r(qiLOEgo2tlEQvq!aPjnCM14!KSCk^h`{7-H7LDQ=V?b3v7CcOs~|W8}Zth z=tjK3rnkuSPEEQI?~RFW#NXKT0h#J*(v285Cb|(5*ffbuk(zWPri_Vh#56X2M5Y-v z=|=oxOmrhYVN(N{X4Rw{(KIHy5ivH+A=76y=|;4SiEcz2n>xtURg-SSyfM*@_<~Je zk!e9qx)I-uiEhMqZ2EyrKgCqgjrf%|73fARLX#T;Cg;>78{xb$v5j!OYY5yHZW%Hy zSCednTVYIWBiu@CT7^ujGvnP(CvNJoDDExOTWrI#UJ>7`(7IXMoa_bVLMqh8D z7a4K`j=nKL%8t2B23car%^0~QLCRpctp-_U$ZZ(8JweJcIdec2C=y>#ZdZbo339tl z$|_5_2dCVdAZ2UZK7*_=#Imxgs(4mUYJ^= z3+bFCTBD0N=Otlkk1iV~?a>v?yqYle-{fx8Flmp*F!P!)wMXV2E72a|d(7PwruOKT z$w_;38|S-eTrEVQP=e%~qy8!Z(}ykTCV%t*&lz(jJZD zoD;&-9!(l1?NNl8Q^M39O&ccd(MQaj5vKOYTybUEBYefVhA_29vnD6)Q4{Bkg{eK7 zGfdi}&zRW~ruL|9n6yV7%)cmiYL6DuIV-eB-*C?F!qgu9FihH` zpP2b8VHV~7bBl(_dgO;7(>q~GkG#3`Dy&ByUwVF-Fr`Pn_paB{H>N7AM}7s)xl)+Y zBfrWpS&#f`%v>W(>5*S+n5;*B9cHc7%uTZJh- z^4koP^~i6>%pJm%9{HVVW|j5G@50R8!jvBQJ%&kh?#0Y~39}?U^83@w8qIkCGY<+= zdlVQZ>$@Mu%tOM|9vwDJ+V>1*9!Z#G&3V)?Y2QaM^H^r8o_ZVFA5Rmj>{*dNfr%%3 zPjEVMYsp*F{!|#xN3GUO@9pW?-j9+u(){VL_s+Te)RMQ~eb|qse|Tq1EcRf@XK}3D zV5~feRiLqo{aCtVB@>H1srF?Yt1=j?N@CS$th4=CI)&#UnqD!n*fVf{6~`JKj5S7LU8AwC_hacG-!QS*19N{9$GSBb>o$pXhsL_wkENS_ z&%|O+*ZqAQ>%m~Gha}b`8tZXCmj1Qy#KdBc;{8(`>)Bwe=Ooq(8tY|0mj2E1Djlmv czvN%zSZ|V8`oDnuTaxP?&Go+bP<-;wzo*!3IsgCw diff --git a/BCR2000/Preset.syx b/BCR2000/Preset.syx deleted file mode 100644 index df3e9228d425d379595d269b3c3a3e71d697906e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10673 zcmZ{qXUG-9ZW8@FEZ>>ScU$#^?wfWmX-`kf<-vTXvAEReX0NWvH)cBRrG;*i%6AUs zr{@|g-K5=;OT+nQdvY$r8_CbMW^ul?qm9;Nd#ce%@Fv#f8};tWLcNnrOeDqrS0O2m zjVJZj-F9=S*AV*peE+oXmg~)>Ml9WspYH?vFPUn?vr#-_^7mY!?rH06Vgt)bi})o?XndTRdfXKR;zXWy|w3_v~Swz2d2e=a;Of zVtMv)Pr^L=#WOCR16j|wzVnIattf*M9OsBZ1z)VG$PzhtoWOhP{n^I;f!-P_(D3jR=*$}t6=?*j9g$WgLPnpb)xX(=wnCYRI#?TReWldvd zNBqrAkC^E(Oz4Ow%4BxLQ*L_3OwVCLN4!ubvm;(|(<^4G!-S4_txRS|Omfo{Gc{mB zM@%b|*%33`^oE&cVM0f|RVK3|=D4ZJO!F|IBU;L2c0`+-7MN)fCUitcnaqypa#N3) zmS93hEGv`Q5%0L^Ju|JqgpT;2OlC)XW02 z;M8QQTAkMVL(~szJ@BnpJm>&n6dUQ!K)2 zO3@5zHPy^!;H$}n)f9^`no=}_8cj8`8Te>&VKl`eY^D^=pf*#@YzDrWT-Z#p2$LyA zGpNZ_Gn;`=CKo1CEW%<+(F|%a)y!t#i^+w>6pJvJQZ$1aOttULP$~7xRXYRI17A!oET&k5!Bj>!sKHb-yMYfT7Y0)-!d@z)8`NH^nccwmk_&q& z7GW-x(G6-Y6$@rL@V(^1UW!eaOJ$UUnoGrk*$#X!xv-aF6XsGG?V#pTv0&B%-%Bp+ zrPzeIR7O3hxl}Be{lNE<3wtRxVJ?-?4{9zI3uZy^z2w4PicPblua|ngb_>tKof=D} z()UNnQ1ogsNM0g7fdd0K?&WhWvKU+#yT}k>9D9R*3vA&fG0bNs(8BE45PO`QY+(n=$%@pL3ru zk)uR0%^dCL%maamN3Q;$VwyP`<;+9EM2^%7E1NmuE6g7eCUSICozu+GF@DbD!bFZv zD5jaClbo3sCUSI2G0hwmII}2BV&>>9XPy%#a-{ZJ#mo`kYyNy-;<2Q^pw4OL=psMogfNk#ONwdc=rU(s z5hikURWZ#RUE|E_!bFbLgsYf2%1k)?-H5*-Pz33wI;aX#^y*vjNu%qxg^DQMQB)PB z=+$=(>OG+%O!pO4g(;#wFsKiOia4oxS5a|_sF`^eU4JB01nP12tm7(B(W{>rXMHME zMCzHMsz^oD=LYpfpyF|!f2pV{R1x)+L9GiFu~Nft+>BLb_eHWPfg)E8byPD~)5b|> z1d3q2QBX5jvj+67KoP7t1y#Y4KgjY;13E8IPGi+g5Mc)m?3#eB;Iu;;YuyI9;g~8v$lQoP Zan`j8+W#9TK6G>IICOpg!@;R<{{ZcD5e)zU diff --git a/KONTROL49/Preset.syx b/KONTROL49/Preset.syx deleted file mode 100644 index 162dcac517a5e3e3e38c7e1d754719565a06bacc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 780 zcmeH@%?g4*6ot>Y$O<7_iVFiFAq6$8U~v z&-^^k)x*--8mMZf!upOeu44+vqWvg{ZF3m0(Fog~r#(|z=|g=wmIboiYJJ%!*rYiy z0&59nHGqqW%vi3-RK&1MZ^D*Qp?(ef))$TwA_=&uAH$EZB_JlGJd!W4Gye$t4v@)e xI>-$mLece?a02Gd8RQ@kNI^@0K`k)p1DOHL8AJzMR1i#m`NdWKEkAjI*Bcy?9K-+s diff --git a/MPD32/Preset.syx b/MPD32/Preset.syx deleted file mode 100644 index f279ce0a37116abaf649433f36f22a71d1ca8fa8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 296309 zcma&vTXrKm3?Aw{XRXY|x|O87>3Sw}9y0U{A>R*zCCPSoZ|~i*T!mtRAPC|{@c;gQ z|KtDvKmYq{^Ho33=hxT&`k()M^R+pheLa6Y_s{FsW`FZ-Io-#%`|Wug-){E?z5sqM zXU_%nvC&+8yY07+@$I?nwhep*d~AT{VcTsz569crX1jR&dOtqS4SWOqwb|?-uss1N zR5tr*(9bGvKcJKzG?-(c0*pWSi@bH869 zuseqqm+tEQdO?+3d+cww0qj0%>_)4P^LY~Ra68O59S`S218)HL<=t%}aJbE`9xi(F z)g3%<0T+euy9v6Sdsv=dk8>K@KDy-{V0fMO^R@fOMC!ijZ%dEhc@Ow;TbdS++wRo( z-_q%N=WPzk`A{wQ<;Q*^uL)ZbmvUfE)DkEDRyFZ04|R&~CFVjq?$3jo{ZZ zJGi~}Sl;(zSa#P2J{1~|+eK%>A&|hpFK#ZfJ-;yPW8Y|mpX+7U`{cXLwZGk# zXYuu>ng1MqhSyc!28NeK`^9aKYxm?a=^TE3tlaRpoO=XKtV!#`W0EfXe45ksI=f{G zPhL4(AJ@~s`4WC^S{zREwVO$m`+WncL$`bhKNp5yVa$Bf<=N`8)L8DD>n`Ev%@4W7|a|FZBeVKIaZ-*0V z7X3E__U6z+=2HBeVL1@fgYQn~ZuNP3dVIT_S}mp1 z-_JY5DmnkmSQhAZk1F~0(Cy~B@bf-|&%i!U+RC@bSEg8fd)zPAmUWl#bGP-iMXYMI zc%@ps;f2jtw|osh7Z02=2ZPGCk z&(CFDotE*NN zl$+pxI}Xxai=WfNVKWCKOuRXezt8%1e4!D34qY#3QM7(U;@7on#HEWEzFQ7IKh7_9 zJh$k*8@NXAuxsYO7C+B^AEp?893$n@>f`#@U3vts;pg?T=^B9h^WpIz^{k#8TgY6) z&ou<6xz0YfkH(swfIeSemuAsx_*n~EIF4?ew>^R$YO3F6%i(A82sMiWn(uG>tTOe^ z-8zEd=gX9BCdB}~9qvFgEcfkpv!8p9i+6dh7 zru*#4^L#t>aNoku&5pM$2?e^@Psy>(wWTG}t@ye6eVxOyoATnhIn?8~UfOO$-*|2_ z*rS7C{DI1|+usJ5AhL++;O@|%_*%DW<}H~FCfC<}i1M_6jqWDdez)6g4~ERhoaiQ7 zicobCw}&&nc3+oEO`Of+>#N1;@__Kykr;hGeLkON7aANN+Itw2{eEN2w?hG09{cFL z+cU-bc-lN3%ahQgkR04NZJy5sCoCm{zxK=H02mK!pWEBHx&ClPKrK9X`~3-%9}je_ z{bNsszW8_b??#&)qUdm?0DW!m=Ih=|kI&8HK!4s*c29?|?Ky~C_N@*at~M#oE+|BQc-!u*bvEPrC=Wu1akRgLmC#$jE z)C14_&I7wVTSD$|rMPGseJ@`-pC;qUnCF4*oY;e&2nIsXIF4ztkZAbq`B=7x z$5;JB9<1Zx@L4$UfQHqjg?UoLaK;fA91$`NKGW*gVZ7}zfpy|5%%~&gQ}LPh&Xl4P z)jadyYw0z8xtxOYa@%F?Jnv6?ec0;560n{=+gk?sYqyY%^u2TX+}g(S4E=7`$J4(( zih1aHf8HKfBs$l~ELf^$_i1^Cen#PNUygTke0#yf`@w2QlT=H)hn2lBp%@k>Vk)(wK5vB2Ap+xhv_g$HFZJZ{X!h5W zP=7AosZZlQ`(wrc#+TdSvfVzb$vq)O_}%o%ze}r%2a8a+ZhHye8i5Ff>+`YMJ}8S1 zwmdn3B_XvjfE3s-3k*UAJf_LlGY=4JUYAF%TmcQdj|gR+-kFQ^kJm%n*F*cE!W_2L zmbC?H_|J!S#P?V&iDJOtUrP#qJoM-ZyFCWy;L23-5ck1+lQAtI`hokrvNuOty*+ICPX_6% zTGihDI8m~6)RjjuY_DbVNao%%q!#+A=*!>z2_dcjJt&;j#1+Rn8 zv>z!GJ0mN{^_~Ut*n;bt33vADT@SEk5i627a?Ne}NG%IBO9nMatI;JcGz~w}s@vUn zqCfnQE4y^=-W+Us0j#}ECTVU@{@PK-#uYYZx17n=#M*7?3iUwq*K@b6p6r{0zlbcv zTKS8f(+^y*d{gK+frm^Pask_u3#%vyQm4$6!=wg%7unt^j4~e+)RH`bj{^;As=4;| z5aoQ8ACuj`tOn$5Nua!TOwiAVw%e8zU;G?DUh5%R@k@T(drh^*8j~sZzTxVjdmg$> zYB1H&?$yC}Tqk;u*})rNEs*1{*A8jmMd zO;YtZlyI^L@H&1!IyffnvzDp@d7PHBoZo`XTrXnEnLyUr$HM8m98Ua#c?}%5(YlVT z!pV$}D8$Fh=5##oDcC0jejJwNU}c~Y(P_UZLdRCa9=yTk1^t$CY(Z{Ni^U4MIqiC2 zA6%SKg#$`a9<8F)8|Z$SEc38{LK`T_7+-$9Mb zN=gF-4G%pofDhadAFU2hq@*vjkY4WeiyPtepfsM2r+fJ$ZucYZ@_oDediZQ1weT-T z!J*^stzP%z;lA12)XN@Vr|`zmlHwlhhn@^SF^>@Erl67fbk+Z zeuhl!mKX4iy`2Y+y}lhcfM4_B=K~uLOz{^#G)nA9>ha3o-tuVnJjT-zj!O$0O4;U@ z$EKBV+!xME{6fXiw>?Js_Q141`1>iox^}X3ydKCqzK#>MM-R#|KEW5B^njIcH&m{N zECGY#j_z4YzITp(t}_#{P1{Vz6X4l7Uw(OP^Vs$RW-w!6YXjr_2Kr@)4|7nWfDh0) zUY|pB(fJYsyii9zhv=AtRgO`i$QGUwy_hQIYgLG7IzF2?#|=*>zIuU{0{bTD(oj#D z<0gvx3zYo+P&>yvZy~!SX;X^(3xN-1v-xQu6(bwV3W9eO_^1MGBD@R$YPWPab zHJz3CD3fFJwd@{RW-(lH(eGtEa=0^~n5u)yyTSL%j)59OB$JHq_;9xaT0humpsx!k5&*5RSgJYNJGt#8VA z&~dY6AD6{@-vzJWm2T>CIEqTOYrAtFm&MmIup?by$*;HjI);F^3=ZIGnJKxC%VOyI zhqj0l;iz#M1RrF1Jh%J%xnH*5!oFstw*cN`==bA=yz6;Q)JnE*%(MG4T}bzI<1Xq_ z8sLzBJ98y^m(TIOeY49w-FSQX(aXc(UjBODZD<0^J>B4u{CxR6FMqx7G6cionwBGc zf4uzlzO{qV0W1r@y!`dP$y@h$Ub^Sl?)~Mj_Z`zg&cS;;FI!sYpycBqz%eG2bYmcVRWN;X#GL02aa_Mvsh`*9gy{ieKg(;UT|B zfC%Up_S`H(3N9ArS1Z^1i*%0|4U-+3-S3%opTm>C-d}jz-G(r>CISBY@R*Z5Ma4l$ zqSp&QSRgdaJC>sPWqE{Mf8)PpFp}QJr_ju}k_KzOV|pmf18EG)2WdK-v#{Y_t$dKi zyxl~i`F9$spEVWmGm<{8udkkCv5_VXb!pR+^L7FJnqM3K&Ueh_IA!2>%T3tyYs{}Q z+wssQCh+Xv^`2?{IHO*Pi2>TApwoC7L*9Td$0yh9x?-`+`CmP@+Xt^N^bKCqF=FTz zZ7sCyF{1oLx|5f2);}M6iw6f)7K`2S<+NDB+>!NV1C*J29m0UtkmpHr&uA-aByM_r zH_T7+#`A0h?j5)cAS1UvfL7~wT4Mu;&~G=W^(zpGIlh-@LE-Gc zDQ`KcNjm3c+1C`jO}Q`cn^UhqH*w)X3@gp!v7;)480T0YD6X3|-wVIxb_C30o>h$V zeV2gWa`SQs#J=PD-(L2SAy%3j$EvY(`w~T>qPP~u%hduv5-w99U70cGQ30W?so% z;#cT&;{Wz(VTxKzR6mY}rSMy>C%|wAe&Lcn@w)P7>vzCeB99}BW;d8|xuO#DFJPG@ z8!qURBms{s_+^$iEj#N6ZyQe?yzi6j!Zl5ye%pY@}46rsF14-^OVh# z2nT=JRc20ld%CvpLnt_xEj=VU;X90xoG=*OB`jXkjoe6 zP5fUxhUKk)2zICKT7dW{&9NcPv1>;o-YCJL8(K{F?v3vyJ<2TE8rXO$Pcg~(;zzfh zUfq6}+bzJ=$Jwn!ltzT(#_Nbv_tPtN3zxiiWo>y4J={(+uy=a|_;x$p&X2nw#sxKn z(CFURA{neD@eH|txJetaEWS;Kh|CH-MIxTWjUM~v1u7t56Gh^cSW~8$6aWJ8T?QSd zk$UHF&DyxfXoA{T471z35E$>A9?QEGo7k(_7z9v4Xw1ZvSHlSQ-u>m#7M^%G;F8VT zte62V$%@Ljthz@g4DofiyM}ZtpBdih(eJS(GP|a@a7|3GOhe;$yPRQfCbaoyg?8t~ z2Q>>k+01RI!`}N=$5`ecuTAP>F9KtTBl_E4y_8R5tP*(9gxd`_0RI+*vNjozb1x~U z7#KoNkG~B>r=Be*R4+v@3+*Gp`BQQzwMq!iVZOJXl+?a|4e#z*I>dcB9RR1*r{><` zbjF4Z6a*dOLoe|c9p@;!!3$e*8MC?)wet9^n2t=Bax|42(JEvFOUTU!$9l3E-c&1Y z)`lAY{6@C?koo=FIQnz&z*n+uax-gpeh+Bg#r`{?c+r!H`PO zBBMlD#AwAnBOfc%A)=02F+$H5DLeaTBo}!lH;cDqEW7*DbXN(ZqS!-@EzD2bt3=qB zlo39^p1pN?CU42KltJjt;k%;|cRQZXeXc#Ziyy+=#W1p30q5zwwr)oZQ3%t~%eZsz z)X~FG^tzRSNKmo>rhS?M;yVdJ5Fi4FE>Ga$EFt?b&loaVejJB-qMf7)CRIZ=39Z>6iKy>8XL?_5 zf(bzD%*_Y1DESv@19&ld>ll!Ixe0*CMyXGWC$%EJj^q>hj0xCYcG)k`aH(!fsdRg* zvp%W)K1aBJ$QxPJ(NODlGMpZF5)sgjI^^?GpM*y71=la&?%r803<8!wVTuzbSH2(2 zz8kYdq%$p(;x}IL5HND*eBPfe1j)fLsm-*p1oFB77S5Ix0dxS$zIXxVUab%!lJ_KN zyZ|xzH^AI$=hpt71dY)m%i^v5a-8+MI!|JMkN0jt%kx=O7Eg=^7whlqeXUOK@A2O4 ze_;6n3}TN-=lBsmyYmKU7Utez0+)?Z1S*G`r0di`Q4%eGfWh_!Hi&xbK`B4N z>{jUU4ronIHpa7-7j!N^0wd>sNdF6oyM&4HJ(|f~5^0NDiRjRX4<-UJ9&BKT-NwJT zy{vS3;5lToCIHLK+T>QfGF(Roe*b$8$|L-IISWg?wVokj=htH9+cKH|5q@4T$HHmi z$wsNkCD%&Ca`5vb{9GW-oTs)9&rGDXkup1@%>%ZIZaZqhC0(m8@l=zAD2gC1<~hgx z=X+i`UUiAD&}wlaq8183oX(kDOa<3OwJboV&CtsC7YSViXySEl37#`JD!FVJ2$}5U zax;sCDKg#Km|5JWhgI2|tmgaOgGwiVsnLiVl`glTTv4mw2PJ7NA1JlrvcJR{3Vs5B zh2KEQUvDU_)QAi34L>sW6D2e1bRQ<*9?j5l^TO{3O1sB$XsIK94&iSkqq@GMM4j#q zmV)?(U!zoV^ZXbn&GDBQS2&loc9XPs+CeGy+BrlUNIhG+Ula5S(ZYI{=yW~UHCp|> z-Aemw6vXOf->3_NBSLX8$c=2VfpgSyL8^C-IFT#ih^F}dv2;Lxe=P;^7mm0~CY%)S z*v9@E1#uXok|w?OH;Q+v^Zq*cS->y6`oi`2&K>tjjq-gL_nwgK4Odx@E5r7nT*J@B zoL&*`^^R}cvB-dIYJ{KRdi)L7X4vy{wj6$b$1fAW!>rdAO2?j7*K$8*D*Zyf^ZB+3 z`Pz)$U&GI@2_;;9lIOWjsKFbj=I3kpdGf|>&~Z*Kdgx%@<8Uaj*a5#LKS}e3tG^1p zj{5B7fTn}B4wdU{kna{J$~xaW%r6lZ@OEFg(rD-8e@(7Xz9WbGTk&(Acn$S^;yTgW z_qtIL6%>g7`^A>GPf~Us=l&Lcj+<}bdOy75+UVUod23IrwLbrXYY+7hm(6vz@bkBs z(z3H>3US%#+4~xY--cQrjuWL$f!&uEe9RP9{E5Q-3rZbF-QNa3-y~+elkfZ?N8139 zrwnGWrJV3L^7(7!#CFtD;j6(nIm+KSah>S(_L==H{448>L7tOh8Fwk&n?zm8 zt~4u#z>px5Rs4Fqo-W7R>3rnBi|3lrVaP|kU*6sj7z{qZi*TasUPepyf%sQSt;do( zfj}L;j5j#sipP^9g`Er6iq!6wM16PHeD>y#14}fEHo#=t#?>*~i$_NjF(rF_ILNtx zb9{Qk8EXg&MAu5MGT%4=hjck4(VkV9QGT4$xzTq0{g{8ZC_0GG!MT8+yLq$!wmgl8 zd-DgY_yt^K8!cZD^MaTZ5~j%X!&%+n{6^Jb9L%l`a`pzS!zS9lP9ID+BA^euMeP8oj-^6}MN zIuOCy25dT?g(iTTl(P5ZS~?{hd-o~*{W9Fvq6C0r;=Z|31mkWtd6~r*&ugagoEaO3 z@NKne;jl3x`y_0(7#R$+iDR#~{O;saCH-}Z!&@rhc!)hbI~cx&>y(?Bt8!_{2GU`%b19{Kt5pV^9N(!?pd#`$PDaDB;qv z)|Uu{31PVYiD{ zC&ryMzV(_z_|`@%f_f(BJYgY;x0dAA#g_1`)^<1Y)xIY}-)_D$Q48o2 z_u2B~(-~+i;oGgva9Ehn@}gs$_)=?0@H6}J{Jb4cy+38Y6hB|YX$tWWiz*R63|A7P z?jXLoJ9P)8;9w+|oYKF^jL$)yv%oOs)$*iyFI$6N1Js9lW>rvV-K`9h`=>;Qzln*vUtGiE01t z;L@_L6rF!{uy+{QL-OjNz*r4W_&XzL>(m&*4khi6;pcyi;2dtLp?~jS24#9Xj^Ss- z&&2YI-^X&=_s5dY0jq(1>*}$`GRZkD3aAKyj32a?+{2Dro&Ix;+sRNLjjqgr!OzF= zvl&RiZ;1o;yDw&pd9(nwuVLc=U_`}b+pT9$pH<*u%1>%cMhZWp zMwp*Zn&Qoc-fyr3_0(-EJn|{q>+Xra0p+q8wCx5KsiwsHpz|sG+=BLlw(sQZZH)U< zu|I{Mzth&WTI46Ic3q17srb3qrFYuCle44s`%~!~Z8v05 z|BY2Erv{74ImLA=Z431K6jQXKhr9I+_J|4kgOfvTi|Aye@bk)NKNg5V?anZ=jR`O^ z+hen%dRfUNp%zyQ^$f0NX7!?(x&;You~kiP4{RWmneBT_uzIBTXYl!^gA|7+cO7ZS z7bccvHQLd_#I*3b4EUZY=kRkay?>e|Msj+b&*A5FmRxt}{ytL#3E2_kM||rR>V`fo8hURf)kVuaha)y?`JKGHSLg*rSfFDB&*A4u(XgxK zlR`0wDNq`!uvZ#50EzXil-&$PGsFy)_5{S*a>-{l4s}~A33uA3?9KUW4$$-&^H3CT zRH=z?Z8ATHe7d$Uw^g?0pjBlRR&Nt(%*2(>rEkRG{+LLgYS8qtox{&rt3uUKdoGmY z;ap9iwy6<*{+rz3(-A;z;PCT{3dtpp_)QszbWegp_Kvmol}*J_uX?36%r*wjVV)z~ znH@M5@Q-1RXs5GUOkzBk4Fio!`1#d$BPLUkGF;t_?UgCpS%~{f_*rFzAFqCYTkr2y zK>@ItX}IUBH@1_$6i$<2HwVRQ$Fwqlv-QuhRK4L6ocVq(;pdAGR5S z@s3o-#NJ`{)+R5#TVG9)d0W4b0AX{Igl;-c$Y zK>BE}_|eYhqY@dhW7)IEb0a4#8!CTu^y<{Uv%i!9YLs2g= zOAUi7y(8PKI;dx>EMTj+)Bp(#$Ik=?+mK6v@6?J#VGS&4>_;0^@CNuQt8KiexBR#g z;Bc16a2_ZmqC}x|{!TBtwvk01@DZW5$s|a^9#>q*W=q#zo^3tQ7CbK(;9v}S(XT-!bwD$M2qA4DR_Wm}s56O$`p5j>a8QLr` z(wAyz6C@o&n6#1E6!}C zawv&;GXD>%4;+U7Kpw^*rah1CC$yau&IZxUEhQ<#dpMJ)JeYGt+r7pz0X$QW*1=dO zE5}1dw7q%KaGH_chBuRy>#j%CylLO23{SIu@4resyMmSzKE&q_iSx~G-P{{mj7s}zD;Z8E_9NHdDEZg(O z{9IEVGu+Z9*P;DwG&>k_NI}7xDdxqmlQpB+i_PJl;pXP7my(B(b~4D4v*dS{aM*Wc zHGaDQuU$!Ocs%$Ta;X~48}|(NZ!`LR5?|(^WVm01%JBRg^1``lhX77&qjsCfSxZT) z2<$`=F0a)$3vRKCB<=Xd$M{>D;3bQb>Do(4_85~B<<%PNM&}|m&S<5jJ5~wK_Tm-VMt@u|%iDfKRZy-1)M4N?aXr!;!UJb+<*f=ND z4dcWtl9A_GpGVOlc+2D0JsU5Zmh8#`mPnEnQ`sOd2QZZmSF#hJTdvd%S+vp%<&n{q zoYo!jy;gni%srTlm z-6B^b!5{wBp@yZhkKAsf{K`rCk@8J#=c9@8Vtxqx=>Z{I<4cZ{E#H;$?46mKa^`9$ zF)lF^_~Q-B-G)r6S@NKsQFK*siO=7HW%o$896ekXbqRruxXw%M%^;p0*#k1S#(HB! zqiHKi@&kD_4kQxlyNGy&s;oWH!#Sa%_+zKmxDPc4l0^x_Jq#7E|)DFK|JItP>KWK;i;?SWTaxiqu z8P^)yb-@`EfXNl)YBJ1~Nr6Z|?IRq4gY4Ipcflx`b92^_$aXZM=X0XEH0P#N`eej5 z8j;(D#y8z!)nTGAw5C7Gq$zVzkk*%`wMZyU0i;yTQ{bKlt$hsQlz>@)C1-k&@nNQ?769^ zJv07#QR$FeQu1PMP7u`(aW;1J>}nsOhva?j&bE>~_?}eOv=WT2*;<&EWyO}UV)0B% zkw5rGFw9g^EOc&-W-YsOH<2GEhB{XEYY8~=B$yDUgW1g%9-J)12G}$%@EUNkFUh^F z+7#yPQdE8d@Z_ssSgF*?-r%^D6&GdKKB+0vYfsplVQ}khJBuP&FEk9UIOjA1Yr)?n znWL4+C9#Ts4GIl)42msVs9|%wyglt^$>;CRAmQM+n=({gJ7`fj3)`>bhE$Ib&d!Y} z1wB;9mS?tshU0?m+qfB|tXAt#4Yn6&1Uy?B<7OSNahpTswZqO?Qp+-tC;N7y7xlPt zs%4nxP_?`h&}m31hhFYhw3nVdC9nKA@F0|CK@SU zk_mNH-WxCn#Bbv^^h{!!Bs7^ATXdYefcZ5GI{7R=oIK`AGblr-s7waf2#M^9iA0F zq_jviO1BQnH43&xf>;R)&h5-e{QqD}RTqE+|@_;e_ArA-*m73tj=2ogan4zt^_NszqLs@k%mYxL_3at)4U2GS?y*r${2BOhV~gySNHh*;ur& zhbZ18`qiWW9#BeY5~gYKc%y|1^mF%j)gu3xa6Ma#78iV~97m@-D(_inOu~GlMFUq! zA(>mL)leHuiP4)$_X{m%9&y5jpWJZ44XSFcs|DM(cTw7Ll6D!oapb+(GOLpWTV6I} z9p0-hqUG>{E$N2{dfxm>1-rHj9NA{&Gj*cVC2hu?j)FT%O@_e~;%K1-nntHil&*ra z=28ubMpCvfWnWsZHWEsRt2m=bnGHH9T<)!t-BKlSlZ*`KQpz*K zvS)W{cUe?oP^oV^(V$jWlBQC_u#|Xj62?6zB12Iuac$H7XxZMik_j=-0(NeT?1LsV z6rLqAK^z3KN-B2hlj`hKM}6A#A<{dR?!DTwMBOyd=Bu+}SRrtI*<>@Jhy;VPeF>zD zJqytjtrQX0_8Co^tffr~hMLcB{$Mou;U}DVAd0J*b#GMt)C9-+JVW7jgK3#@6e@>D ze)AKqz~Sa>Xp4xfcBbAJb-b^2!OJ+(2#Rl--*owg1)|Q(L>GJH{;d?O)L8lXhf+iwOHIj4hyCOGZ83?&w zYx%e3dsrDtt^xM*Z7r|1d-+y!u&qgW0FAdJbf8E3{jnV6u`cY)*%Dwu;V}#=Raafa z$nnh96jd+|@NY`MJ2`vuFH3CARYtDsXCbqabEQUX&7lf?(sm_h)~ZR3iusro-)TF! zH{_f<=E=n^$m4iD=aLg|_eHT!CY2B$4r5&kJqe z$yvV95}1JFF^^UBfwx zVLcr0BkDZ^VBg&6!*f2jj~*WZUISpi3A~F9GTpKQ-mzA)oKO>LyjIh94(+pnB^?5O z$phswnF`;WP`&f-aM}co-vr)f9p0s7Ix7+IVDLY)%-qv^+5!IE^W}Qaz|I;xh)#}K zyuP%jqoG{`E?p)3aB}L!D)2ses@VvoI_K;lr4=<^WTSPhl5%yK^=T=h4S@ICDK`Hg zDcd6~iT8a_NXpe^R{F)@y0%slI4`bSIc?PkD;$^j?M#3W(*rz~Q(2*_7yr|7Kix~I zU20+dxq6x*t*y~H+mcN^zE!lQl2Ti+_fa&j%I3{uN|ubt0A80t3c*|&d*$o%K^EtH zH1NBS{+RHb)Gb;Q{K84^7#2f)c;ILC?8>S8SID$CdggeduxLw&wg0I57Ba11Y^s5m(m*E! z9nPGenLJCjNzA~^=aAab-(G#$#OBUF6ry%d9L}j4@WS@;Bg__`>(?Ot;hd@gkLH0f z{sZgeT$3+~Fu=WCc;{r}zx&C4@n_5ky9&pEo1=cPiFceN?5zbV&ksx=aD}Vf_1BhP9}${{#DW z6VUi8pQ(O6iBBo1B<3~Tw5 zlFWWmx3d3ZxTIcFuHL0|q)H_RP>-OX#JF6wf!xFos)R)Yps`SPL%KF1@r)lA5^W2B z3QAnVbwyOC9@+F^7vg<^9i*4!@8;kPS5qXAN$9_JFI0VdLD%_T0gp#s4<6>0(JEp- zUR$3*>iwr4>3Bl|8-8AGVTzCc3j3MrSUEd?Zh4^hXy#8DpMT<9C9`C6jpDrZvOnWI z6PZ(;FVH);2eH_5$X4DbEb@S@mK2BgGHyAB^o>B zpQP)Ox=Pjs_(iJk-|C%x2BhT10RItQyBwaiH_wmI21&x8= z(#&6c^kRE$T)Km$kL$c5kk8(p8SxYT(13mOCX*)t`ZN&X4^n85$w=OSt66Up#S%YUNs zc@TA0Lnu@FUv!@JEK<<%FE|f!PB(3G7XL}-6^+Q}-)WqygLIeHMRPL2U(gu9Gb6r^ zMH`p?{5j`fiMl_74WiD{MngOK2c4%9sBD;@7yo|$Hu$+6?QuN*rGtacv(`!K_5G91 zD;ocapIehD&%$4*@i7O>v+ytAxuK^#3x5F~{M@@>%d_xLejeVCeR&rCMd#kVy)Vzg zpV4T3o-O|;exAVpiJvE({}VqC53iK8zw*ZD%`7rf*3zFlh;O`l7NE%iQTB;tN%l`~ zGyZ8IPBF$(&IFd{PB1zflY-Bf0Gw3Ltcj_Um#V4IvjF#jDEzD%U=|=p?OL)|7_8>~ zOIW8ST<2p_IR?H3@u!d8sK@mA0c<_2S@wFg1(#zOe*p96`N?NW&*ghZ2*0ZQPcHoJ zrw?GUx#364KOegePOhlx&%58QTqbamB>(l($2>~__z%Es_&ZbQKLB@*|5w!m`WG6l zcYP^i_%G0~RIROw={E2pP1DiD>S*wXm7v5}m`J+ROu$oT96(Dlvd9CDeG zo%i@v^?=5}j-f52O8v-Sk89iE)W(+sjVi_5x3i)8<7aI-{QS$0m%sjN$()oipd0QK zeN(waIbBC_+o55T=)-6q-r{DxuSnoh3&b^THMZvwpi zJI?KY_^Nt99|->PM>`&!;`bkL?jW{`0>9%NiLw6IK3_5G0JEk4M(5tJPDcF)G`a$g zq|W~U+)klv75t_9-!65b3nwEE;O~R-@ppT@q&fyTtS*L#PaHzRdULclxyr5TnfTks z(xY?=;Nh|O_U^YoTXPn6D>Me%|LL=H7Mvsw_)kFddT*hE*?$1;BLuk_3-}GeU%pN{ z*KscHue|%)w<$&MgP;FSx<2+W%i6z5*D1l3*9?o<@85s7y;c>r{sF4~>9DFK(*6h1 zbr4rl!G9p#lpA&Q2&6eM{`}vp0-Rtk&>^%ocWM}p+m{7L8I2piko0{CBa?(G2d^tAjBI(O{AL@(gs(%i{mmMmJ1&3A{~eOM@R zC;$)B>U$qwe@~q4v961MrE`b3)PxW4Kj^%+{Ga%Fm|*e^2O2K~2Uef_+={EDjepa* z3j-v$sHuO@xi>~7T_V80`Gy_fE8i`lqICqr&pE05Z$G`GKt4!X4)Ol--;~dFtku8h zJZHD^&A%e>$kgHh_Cy0i&eRNFgZ7T0PhgjF=Pr zd(#{fCoQK#HXzoFi3c&S&)Z^@dSh?d795zsWwX9s|A|i7R#MFyjnA8(54BO_fSdmS z+*ON|IsXs9y-aLsm)74<$i3csv#DKL{{rru2gcfeK%)+zOI#)L`PK3~YvGhLc5(`i zG9}X1bDoWJM0`=b(%NZTv?HIoaGR>k0ak2Z;xH{k7}*GC=TOddN&d6~Zc5x2`Il49 zRam5&JL*V#xxKNY&AEW`^QEGT_sV%3VQM?5GvaZ)! zWvkYr14w14I1|FPSwd7HM;%n{ATJHSqa1k4RTqyr}VQfYY(pn0AC*!hE6e!5c+`_c1-q#JOZM z{utmFXV^B9RH_;jTR4MrAKmlTQF}#n*!bcVMY|d}f$Zu|wL!QG8$xPuNjK7YWojrF z@Jc#fmC<6v(0pv}=oTyvi|Tnwr!}?rgLJZfi({Vcn>+{^YogdE<$33n&+b1Q6JV&c zvp-ocT6Xfyt6gOp-`C`kLMB@w2aZGuU0}*tYuVeu=rU53&WKR0 zLkU!N@C}aIwqJR0yRG;kt^IV_f|%W3-`Y?Y^WscQgO;=KGH7E{MKaRv&V(69_d&QU zYzXK4+ZV$1S0UVGB;imm91DY&y_eis5&jBxK{x}iqLA&wEmGhh+DNilH*HM0E2a6_ zhRwv6Q9~gJCp2kOyq(5D*x@X5#k8**%c~*|4S98*I%>MYOyK)R}*2I&M*UPxr(jm#v?nixQPg;KU81r=nmw`@qZ z?)uyx+8b$)*OZvmZqA~T#MkZRSfXqqoO6T#q~~pF3-V|;HQZnO*G(>5P%o#>0aoB< z9pIm!)adX<&N2+G{|wp*@*F*Q&B>&75dASP|I_a^`9My@b=|D~QX!m@9t(srs5YCy52WNHVT-V+wuN zS=Ft|HMV?fe_P_+eHZ5gksaS3XbV{l_yCVsV*-;gX_Bg|ZZ@^e(xV8RnsLQg40*uO z<#NE*J3y|H_~s6+YAnmN1;9k44UL9`;-3(<7~w9*x|I>{?+!LQ@EmZqgQcZxKTJeE zExN=7>bt)?yH42$z;(mT{FwTRP1h$1Ngw9XZvGhAM4*B>4dZAQ3B)~Y)=ZNLzw{@?s zRK+<0XHayLS4AZWU&boz4OWSB4}v5f9Sad)}VHw(mHBj1++FJv#LRza$Bud^zNoq->9vp?n1I# z*z^Jf=COkTBWWF&Q3m5EXDFG1^FbMc+{zOMIWy2gM_Vjqzn*~HIOK4n9GV*SqvUU% zsJapLgqFwgOT`&Mj;i21&C2S)hVhBn7gYx*b4gW=n($B(zZ5{Ai=MWl#;fD3zXUc{p(Lh?1k#X-~agO?9R<5 zr$GJdr>j*^N&W+HC8uZOMg-aXp>bOOofJq!2y)mT9%u4Yp_~DT5VknJWOAJzN&(}e zPc>+Bsm~Kwa&&Q)JOLSFBi};Bip8fI)6F>y+EOFPOefeNE2F}`Y!aHXl5frgHUc<) zgvOnA&C6j)z_3Sw0oz(^xNsQS@?`jvCI!s#p+o#jIpd9=gON$u)tXAoSj&}cjjyZ% zkHgfh+6&B=V0?s+(lLg?HRxp)*|)i`1po4 zI8HOB!xz8RlGk}q%3b0U`hV3df^-)1H-*j}8|XVEA+Hp$g^F77{bc}cSlin*(Pt6K z0tpAIH&Ir~#-M85_Q`-9Y3MfY*qPajsq-wf>?8_qvUYsF<+7FcInyz-A9sl2N1e)H>?L%8CGQw_}nGUOFrB zf(OGIJe#^2IwRIgEzSn@?31)W)>(#JGLO?ON1Ii?8*taB-_Qz8ECU8)yUZt0Unj-$ zI;V1G5wi;?o%w5Fpb#o}QBji?4Qg%86<3JXNNL`=-QLOcjv~aEh>S3M>CwoygAlXNqe5aZ}uk z?7l~$ng@DeusbVBxCwB*Z25EhT|ObZn6z9k`*Zts?0bS4@uzzk?)iQCkWfmdG|qKS zadiKzF=}-Crp6z&g+^4KSY^QHHtZZ>MkG3Xqy)qxj!{H&<^B?qeex-j6tLU83r2c8 zP-bk3>!q@Ckn6d?7Yz9KxmD9C1H(7hILDtZ1K@-Ao81YJw7VM)mrw&=N5B1$R?y-_ zVRV#y;Nutq?r?B2eUQGA4GZoFzx%1h3vvMt@I$t1S3TXH%(U4{Jaz@PX4;HrtH_=eTSgeSqA-OFY%n@9T{ z*2fxv$ICw2e^mR*vDhKV(>_SYL!)IURq^`S9N2MCJAPdH6C8OZa)k z)q!e!K`OL-A~SvKOYt*>vg!;HbKj7uJAK?;9lHkS1*`9(gMD@{o@JA~?KRP1XgDAF zdy5T5GUs5TAyk!D0!;9|wdo{dljq~{;Z~5F3D|Dr1pmE~>0yvVbGc?I<>#LzyL3T` zoZ;v7{56$xSd{Iaa6q$mq6hq7!xv|?#bW?scVD8Ns8+>J6zDi7`zRd2FiCAYXVVtj zI0Xx^S(Ummp+J^7e9=@^N6_pb;~O}>f{zIwp0KUkPVw`enLRdPqw)E6oq8>xLZtpZNZ zwV|``e9O5eTo<%D*dy%-@H>v$jM)rqyop+khHy4cDoBPAs2n{kV@9(GOsj|+RW1gV zvm2wt>xUO0*_$Q}8&_K*vZZBnoqBv;22X+REa25cRb6I2lU4Ue3sBj!}Tw z+v2GJ{aiffdNzx)>`M3lZSjojBJEo*+Y4h)pq77GoIl#1brg2d{r|i;$JUfn(j1?% zR9Y^(IJ%Y=f9j!|j>ITp!;o7w;IY`J9zBzp@D=$i zlk>=U)O43Bi0e`Z^c=~@Og_iqi$LNHot}z|Q0@nTh`o0L%?kfD_5MYmDGbFu{zaf1 zI5tN6m|1mv;*d9$DFoWRIccY@am=h51Tye%DpMVsq(D!U|8omNlhII6nzZ$%etA5_ zJe)%im4C%Kv&!Nn45A5>g~!njFx@Mz9gMkrUO{EMNzg>sZ`d(E{c%IRQS~0T02=3mV;&4Ejb(`fE2lJx?7*C=(xF{L5*&{ z`Nk?aA(N0Tlh$%vi$O}372iyUezw`@ic@oxrO*|eNu;fWuOtb<#~#XB6{3q7J~_n% zV3tjfBY4t`ERWAV9Z2~dEyp-WnUQC52Wl+~7%eMZa;IIDOOAB6R2?GVfkhFI>zv)A z-POz925HK{bGf)I`9yHgQvC^-d)HP8YNbl)u;gF~gi31p3EatP<;qTa1S&-Xrtw(& zcmQa?4g_#ASguqD6(m3@rdcV-I;Dnb5y`Sdidrh4PJ<}bHLkB1$m*oXTv32O1Q~|- zF-J|yeUAzn&Ex4|->SHvz^(B+_7O8-@)eUQI*h!b6a9bQ>VbZN^}e=Je)=D8Nn z+=yMW0bUEUQy|D{bgttbrXX*D;BY3uuLbpe4Q0l*_@1VmGnY);-CR(AGeD^&ZTG3o z84bh=m-YTFiuQi=!_2x+lPC*#v@9m=hyC{g@5I^^(Zd+!kFg1^3paf(2x|HbA;uvZ zG_nXHSt4JbcG_rtR@72VOUvUxhDK&Q_%3|bmg5E=zNsO6UYNd+n}_ z0FnZZVM)gCUJW(r-F}PERNr#`D&Xf)TK%iOY)Z<`(dWRB;?}^~$~ay+UOC?)?7{!F z<$F9UaofOEKT4^38u`!H_Zz!FgW?s{K_Vvh+S}{-yJF!O{Tvh9Ehh+->UAjxVZ`M* zP^oNP(ifgs3sCmIwW6=MS%e4$TD1u`oF^b`e6vFYVCt~MU(;KO6lDQyaE9|nT1o*k z*6Ex)mI;s~+`v}! z&tVec+M}l!0W$iy`0ca=za4a4zoQD|Zz7=r>%uadxbW*D$DAnN&A+FAcmEy+p4i!3 zuY)YU0S3O?kYeL!f99&-!cHQjnln2Ygk^g0Y-cxW_6TK#GX{c7j_NZfd`ZFUJP{F4 z!vi`|yl~*gUH@~|?iS~nrv=B;pN~pB_5m%h1DqRuGyZN?X#WizmKv8~5 zA$wcP$QC~^G`PWog!aYd_A*l}H>{_o#<_5Io=Nzz7zu+`5Bwe8-9Jna_r<^{k>J ze|okYM|m&y@@%imIisncQx;k~h*%wrNyU6)$ZGU&DHaRa0-m0>T0Z!(EP39v7f(5LS zx$jg2?Bx{p${~mgsj8x6H~qysE^b~deJP9`kBom<@ZObb38J)|qJn2!m&1modSo-Z z2UObhc^1PPY9|pF;N0M{jq?`v*jB%`WYV^pkh@xPNAd<>pqx-{=SNFY1=6a7f%-1` z(!il-ZsZm;!nU`10G#Aj&K8E_kisG%K_fk-+62^RrR;d%VA;&W9KGFgN@$xn)wY`e zy+?ID7^idfw%%+WUxa7bzI9_Dla{;vYWXtQ)~w`i%MbBY#oK_b>v7SB7Y$`-fP(G9 z^-&}1f#^8gzKtQ%U|jWSRas&crSneK*~EM$lg*EDqEKcPp&R|2Od=4Poy2smt|8wOP2#vW!TVs{1@=h`SStVM=7M|2TAK3thHJ?NS z=b8&VB_vLyS7`hGFVBPl&8381|MFASH867psra?pnxhEHaU?>unoW%B)*UBdT`{5VqRnIlacTM+C=drSOD{k^v@Z+4Yw zKT4g|OOMH{50G^9V{8-OI>3lrHC%Om`v9(eeL#|^qbGT&ENR=e92Z-LjTUrIus+m^ z77rmPv+=R{2A0Q4O3AIUiV~XPwfsBXh8nK$3^pH{NKW+(V?>F$(^VtT?Dvoo-j!%X zodEN9UlY@irlJ(QNg(m&@yHns51X5a`Vkkh6n(Oyy3hmBLM z4vS4={2G6KZ2$pz0vrl59cvd8d5c4wVtu!I?*@K*a)* zI&u=f;nOrUcSCd^Dy^KqO-$L$civAO<)iF1$M}RuoOah5rz)pt`xGksXJ=J{YhB~P z?9D6fMk#!*!O81XNA+n^_{3A4Q0}M*z(7p|%3!)>DGhd#cErKI()P2nkMTI*x;#MG zfdL744J#(Y3lSzdqhzt^p9twGTp&DJHmdrkVfvMSaL`Q5poNsWU_#|ZNULE^k?d~F znCLGb<+eN-3=D)E?HE_>Pj;gR^?I7su3K5s3JdSZ0P#t=%t**Huz6!}VwFCAiWOln zmy-}${`V}ooEo{p|C$NGA5^7}we40y0bK8;B?seR*hi6#Gni2X#mTL>{xb$hTb(-9 zr=-Y2ASaGyB=Y=Q&k>pvC^$!HfJ_4?DQXUzk}c*hu4l^k6gJq~l7;LCQ~8J>>G zl~HU-fc>Z6%+XD4zGh}sh&j6yiM-z7wRzC zH(qc97iWrHADGF?=JoA^7l;O&bRi9Kr$=J&)Z1sT6>~DuUD&yqJ+28thPs-w3hj&; z02R0=KWvtv5qFsZ7CYdIS;#>A0F8gC;u&AZF3rd{H*k*Vje=(;SKFIv5qF2R%16O< z5RZ#N9$dwam{T#5173OudEOvLR!N9HW_^=0A5EWDIe{aMjkRvX?Gpj+)wjALp=QXY zGVTZfuGe&BVpdWmGMS`^02y!W&C`4|fdBORyw8|Xl8Rp~p@3hncl(*!v==y51KxQ9EO5gn8nCMrFLV8q_P_3fsq^_ zv)sHK6Zu>VcT!F9XP8+aWoK)}lePR}(>TBtf4$ez_FOpk3wSrk;-{)BDsPNhPl3~My zLtE|0v6*&N9-X~l8K(80j!wwn6h^ilyFMR?_GnjVwrzNnKgV}K_$6EJtU6<$$N4~R z9tD~lG!}x|pijzVR`lN;%QX7X;Nnb&*PD-U-aBy1gE}8bkh_+N*GaBcH5)^n1e)0Q zTyx1LEl2zHdI^L+tq7%Ik+qUZHkNuh5*cOsWcKi*U^6z2Gc$h7`*?nQKb=tlLng`B zHdGt>#^Sz}^PHJ$Z~J;$V1-j8l4l+um$RtGmf9Vj3B}}pyj)lm+nV`2yet~wW^)gB zWE@WSv*a@brt}waH_x~kvE;@LHHm!6K=&!88g{{O4Qkvzn|qiY!)y3-?Wa`AjNNQb!#C))v7yr#EJ>2@W$m--R@ z!ZfeHIw@p(9pTn-a)$vakLULXYh%6~Gc0FLy|ofI%xF^rPrtq#41$>!v+RTAAEhMu zw9<_AE}HbTK~IS;My+nGR0dzV^TzdpdhMqazyvHd84y!G3APRHd-kL=MfaVQhx~vC z-3BV&!5M+XCs~lSyMq*|sVd@`=wh-x45g4>Sw^+AMnNkbVUEGrb0otvL&gS=N3I^h&+b#N@kZ`*=` z>5+8Y%;mlq6kO5B!Lha4RuGg5q-_rnJh;Y#lv7_C=Om@!I9bjwL?-^Wtlaa?E@nmT z0?fE!Y)@-uOR?kOix%677Hy<3O#oXjE9nKBM}cmzSddc6bb4($Nokmcb@;-T!@sb- zspmnoQas>H7p#M02aaLeK1hxy+0ts|x}IcRpRUXO#<&?oS0$1}mX{-;W%PTOb@)bP z&%Tli1o24nDU*SA+G&VeN30p;w>{+$rT9x0w?uS$3Hi`1r@l`0S`L#*AoF$@4#|^f zdKk6_PQufoqrfgd!E7;crzj!+phog(JkO_QZ_ z7sVa{$IdsYPWP()ayevZRViUEVQgzbdmUv$M~q?=^&#Nvu&5dm4@eF`$mpURd4pU_ zr1?GZZY(&s{^b*Zby(ZEN(~%oj7S zIZ##0Xj;T?H;p})W-WnRzDf?f?V+k3i0ttG`)6r9mz&1sxU23NQv1)t28Rs~asG?5 ztaB_ap_cbZfyh2vuK?djamj^Lq?`qfFAsGzhBwM1F!C~5^^zOGj3U-gx6=Ezj|yx_ zYRwLlw0L=L*rHzQyI5TZd8#ZfN%W&a<9iLkPFlCnur;DH!~`GT;$zT$-0ivUd!2{Q zqq!@?q%J1}PENP4_Y!B(NNvehubVTjkya=87Kw(N4rEyxLv)F=2+F*fxRFOmsT#i6 zo2}A_N%zg)m1|?P6|c$~d8;#-eG<&1aSK?7wPl{zD}S+9p2J7JW|2A7xl(E?1-It@ z$+Jhd92U9N>EwbZrB9rH}fd8TL<@Ze}$ zjMhv8$9NJ9L5oMYwDqPd^bMdnvEpcxpR|ZCD{6pMt|el6x~3E5Jj)=&#lGAWmyT0B zZ}i*3Lx2pNk+mn4q!2viyPawBfVbB zyd0)OCXR8(!io;mZNAyI_`nvHX`Y!B;hn5V8KU_c*(1I?c&3U3;2~5P5g=_;i{6uK z^}q(eToUk!F>}+zrPNWC0r{TAoaB_tU}^6HEbJe^#jnxEqZ`P6H2_ba6pn#RZwQh@ z0@%{cv7ZIp;Br}3Nka}klVdpeI^eLBj}HA>RMpDSoeSo*id2p6q_%{0vU+6ltj5@w zxPK0`D4jrk=*p>-Pt&$u5HD5y8we@LfFAaUt6VOih7x1ktLfBT5}_F0NSiL%b z*$;igel9I#tQMiGD~0`}uCATW!G1a(S!p3aP-vE68w`xYl94u~S>~7DvG8@_6(@NQ zjg&8qBjlP5{e>Z4^hdYwM0bk=b8!DPo`lh@mx_V=Te&#L!pD!=v@T}RmU;Aj)H?5f zdrpwaCejl^3DQLfy3}ZR47$qgUG|<>CsnJQk3%UE0 z`${U;@^qtq;<0QA14bm}ACp*Q2=FYFqRLimJ(m*UAP%1FZ>sS{!R#1I6!B#JMhseR zZb^=IUU8LxnK4uQXvqfhhYaC$7<)dQ@%5%oh1p_P6h!NpBzcN5wa}gmB%qP#FS7^w;aBI10{K~ z?>LF|_9A5XJlw9zjR;y)#)v+uS1;sPWS4} z)pih2cGGedLpuF3(8Fy~)kg%<)b1(voG<+Gj+3>!cO{8)1OBZ|Wm{Ihm?cymmDud* zlXPY*^n)*=bhW(Sh|n79@I<_uFP=yc1w){YC1Te3;Cs#0XdK%9c*EqmD{HR@C5g>L zr1AiUA6{Ku=S*e$%JX@|xzPMzIGP`l3*B>Wi4`;XLCw_%kC+|JM9CX(^vM=HxPVpJ zGSkHIC4t*DHKx85&oq4+c&924JeOE8pPV2aP;zK?qOJ3KXeYwsL1Ue}HIqMpQ}L`f zo+TYCVt^P>qdG`9?66f)0NggehOkmN z=uqrZ>qYU0=*_&sTw_|c9wfGAIIzk~K4e0UIT%SFCg@!CWPOA=SJ}P-zN1rP$C9~3 z|BUh0vx*}i6NPqd$J6DVohJvU^5_f+@&KZ%B4o9az1TUbfSuxzV<#nni%m%G0uC41 z)7?wdckXS0ByI2;aOBlk)n1RR=_;ArgZtyB(>`U7|Lsj2u^QU&kT<4-4fMM>B8S@y zvpbi58PSC3rDyPWIL_u2^Frch92&`!24KHzIJ5B49_BsI0^5ncF>qNE(o$a0MaMOUD?C z)QnACV5^CG9N5b5H9_IbNuGOC3qNG_GX(}9vw9||0HoM>SdQdO2bIE{SJ%aNz-ywz|dZgv|7JngL>xi~s z0LJe{yxSj$*Qda251Alzdn)pui8m?^)7>*clsbLI41<@X8FeEQ!V#~qz*9i)_%*&v zYXrvZ8FLL@2g;AbH|+3@SN!#8g>>RbF1!A*y#}vWI|<}`K00c0GOpr>s8Sk!wP|2fK+sz3?>up!)5mBVd~=KH{nm!Q{%oT*jwhTQ z)jJK1HeQdVpYyTa2SdyPe&Hc-^7FB=^VV83+f#$}=aa%-X)1C!eZN+r= zns}wk3Nx3joj0la3PyC{Hje_1cgxnC@me*|-d zPOd<z%HH?Wl>#dE5ce`Wo zi;aMMoP7$R@&SOk!ew+S9S}COb-T=Y#n@K=dhev+do}!)*`%l@3F{n$MAF=94 zHzl*FYvEM^6db67VV6ExJd0+gmP#TZ#r;XdgKtSx<;|lcGuXJ63$xn&G+3G<^v=@8 zV6eCT&FGJGAN-s^H&(#HNg|2w!cNC{AWbF0wTjMjP09g_qIOAEJ43CL^0#L0z)sXl z@SxhEtyVk4_Q1K1yO5d)IDnOla0M*6pDMMe+ zwWSy*8!aw=49*55{>+81hit+g{Iln*Mu$5BY1yy3op?bBA)Yp;opjlKr$PZvy-!oj zucWmYE21mL3dK-oQpl-WCYB7awmXHRyeMBm7R*1hfH6n=aMIbG{xszpP8Lcp<4OQ*H zsM=Peb47&=aR$UZ;Ic}F6Z%h6SfyXYvM0GfKmFS)_PZ9IRr*SY;-Vd$8Dn{n;WNil zM6_`(L1Yf6VDH0fGq9}guH)L%ovs26qZeG!8h=2kiue(w522hWbF;YWK7#0586>l} z)@!U^V@f^{lk3iS^utX``l=9}hc%yR8`N*fOFogLU!Bxrdxp>V0D}S%6{cXW~C- zarV%lb9~2*?{wP7PjA_GL!(9O6^-z7YX|rk3L~Nv9n4q;9o~;#9jeMi)8d2Y#_$Y; zIuBN~S>pGZ(~N(9#D~h!Wh>AUe$J+v_h8XH;xYzTrsa)yoUTO0?B5Y5 zm2a%lZ!6?^WrVk|2!DR+aEu2k%wjjqjdL^J!5Utj0Xzb_DKl_8NpeRH)jg8MERj)E z_E|Ol zmQSD>>5A;*6yR~wkmlyvBA;b9QrQ$xh|LzSLTO^-A^GrvwI_jnS<5&8I%B?;TAE68 zaHWO51Fdgx*LvI?1v@M{U2N~2I!1; zF7kjKO5Qjp9hiE|9k)IZWZwBfJ(C7zD%i4xd;Ww9Q?C<$1x3?x`mgTg2P%WkNSLYC zEoUk?xrW1Su(*9hyfJl!92T>sB=pv{>`cjz&CoWniRN4!OjV*1^dW3FL8I*RgY^ZE4bw)ePK zK13{4Dg?8Nkttz$wEwz7>F*Aru4jL369VI%T{0k<*M8Q{L=6xAIsT>a7yVp4U^F+d z67g&A{IBgB-8roy%13LBqj==R+P<{(<7O*O;~akGPT>6|Q&i%W$0;%@t1DG`PbxCG z+Uq<<0qk-GxZ`KT>3EBjuZK?-LOX=VGRQlXmP3y{j%KT3Ec;U^1ySl^*;@&wxQ++N z*i)eI6y@lthw+z@bdV_4wUW#OPLqt1^NHfKzB9IS`(e)tNun}_;Cy1_E7yge-(H>i zNJ*V^ctXZCZA++aYJ`tqY~^Lh^^57w;cD2HFuuy~8KjGOjX8DCCTiN`-YyS42pCl) zkGRvj;8~l1OLdX7olMTy+tiu;>ru;POVRjxEUg2mCV(AuKtsBE97>1YIb69#I_Sub zaBfB~U84D&WDLerl8ucWqK;F>C;(JBTt{muuLmll#`2hl=2`Sv5Y6tcI3+ma4iWLkU}OLtriaI3r#7P$Iffgg`bPf)f)&nfL#FICv0bYClG4{ z0kID(=;P(d`+lFgomV3p2k@N7ABddi6y_?JbPhikZ_N2|nLybhTzA-FjdS?9W;c&Y z$qA=U^c#E|Xmwbf6CH3ioefcCT(BK#C?b*J;Y>SbD|NRVe&z`~^M}ppZ!^0wq%j&x z^>{cThD{R(Nc0*rDCI#(r1_4f4$#}7T#MKRT4#U1fUeKO8aRi1~g4!Tm9hCdlwPML|VUP+JhRZuYurgdrm|LH6$OJMDjG0WdJD@abDG6&=&Fhyk`q>iwnR=@z!pkS?ltHqOP*@Tv*$0@7|cu7t6? z&jt=Z%lRjEE$Hz_^N$lV++Op$tkPj0FpvO4;@&{QZS^1}$=W&|zE|Cu@J8ty)PNck z4u#O~I4_B%^_fY1WEzm$dIdwjS>6sb%fEqz6 zcbyUI3t+E_T`EIhEb1yjc|n5%+jRhOi0Rs08}pCvh$g;=+o>1 zJ|FBFxY5!|;;gOogC`qT2RKc$Ht*{&qr>2H9#&+>Nh8VBcZkm#{GqCS^_C4r=}$ABhnXaLy@stA=&`}u7AoMnJI$v3!p%(MWlLF zKhRVD%*Htx0$I8RMJ-cFfA`5wAIhYDY4JrKjTXNvu7=*$=`Ok=#nzEfE-I+Vi# zn6zhAp}K?N=d@8e>$^(E=}z3H5{yx8;P5l;M=^?8o$F@)<{4Xcxvuldg9}_5L`K+>FGE${hT&*_pR>z*Gqjz(rA^sqijY z?sgNhuXP%@os)nEfGz=kDSShD&!e3OY>5L*yJ$+_JHyS#os9n|%{p{ZZK_EZe&&DU zq-=iBI-eH%Nu<6w!|?+XIQ(qegoEJoA9o5FJ?Q;E=qmclIAhWk=JK2z0#$MyAc4EEnG z%*DjWI*uYVcykO6Af;c*G1w_Ryf1)fi!xu^F*tzj?5jpPT*BW27**Rb*g!7>&KTH8 z({v7Rt}Uln`1IfaGKt5-eORtW0WG{@7)VU~QdV+cmW5eRXVg3JO}aBkWZ2`L9mFn@sv{ zIsE)5ryw*!CwgZCho8TpG3XnIpb;dYsR0cfet!4v-6ZY&@c2bh*F&biXf^wxLZ%KLe< z;UfFgiUJDerezrt6kltI8fN0I_`^JSy=&y~P}6cuO6=7>1l5Kcs|TH&hLIN#SP{e2 z4scYKn1AfdjJeDM14`f}12XzEzg3Sx5`$J3h@rY#q!;I`LS72TxYKobEOt@&m9ZIM za_$PuX_v#rQrCMw1e*~sGG&7bpSklc22j7Fj_fnV2O8%jRRM~iEJ4x0I6nvxT2%w4 zfosDDkJdNf;`)Hg;G=pqaI((Q85aK(Lgh8lk8WLnq2*wrHHjVoH>JOWcl%QupR?hE z@t6)0>ngr@Okt0jnYgoJ2{Sd%uEmmr(ML#H^25LLUG&8RBJWVi1ZkYJ;lmzSLPk(Q zEQlfSLugFjb7~NmPvfVM$P&_5+Xy>{M+if+Df-?`b_TZ=>OOW+guT)1-L}^5%%=dC zPfbhFAL&e!N2=Q%Xy5bs5}ZxEexOlWc_oJoII*ldm_#?j{R(4D@W~xUq085CSOLzE z5Q4My7dp+0T^;~Q0mmy_(LcMmL0=`gUVV2`UqpLIMDj$OUbsd& zV>rcxbid;~xP&cdJimo@rti5*a>Y6KCg`2DcVxw+UD0-LnQLT~w4ATOV2W>+R2ro3TVkx)d5xG}X1pBKb=f+v7(n z{scxias0QRo}5H!fVHPna$2M>sz3Y&S6@3do~#Y94A(+K&a3~*LHzze2jB?k{{Y-E!BSvp)O>;C2sMRQ>~S*M6s@2KbX2KL)jzu{f`O0RI@&c3+(;CE#1j8Pt!T ziF2g{d;=cS=MB8$fKK{$@f+}u5!`1clOB~4U}v=|zzTh&npG5_|Hs&wCN_@iK=?}S zB;I~{%l(fuUlj@zB`eAFdy`Jv6a|7Hh#h^n3vwvtUMI5Mrjd_}t>DQYBrYT45OiKqZ>-^XQl`H<3l=VK}*& z8Nck?Y{Z(430%G%0j8?c_iQRQk=c3zSZO;o(*%Gh2XdzkpNh3KNZ2L~(%4$XP!W2K zSpx}AEM2|71Qv%%@aheHINjD+tJmw~cOGw`_-((zvAcp3`dIoDt_fXbYO3O~X z3=AF%!pAzJtlyBpAe^BBL$-$AlnG_MhUf#bVX!F_&Yu^w-ITb}@nA5xUpK!&C>vNR z9T1ZmgZp*8ps|K*dy>&`=P>wfQ|2|O21wZlP_hJb2!Ff*AJl3MC7xr*V=y6tyC~z7 z8%r_tBZe|qo3dI^=$J4qABIFu;Jab)SkMpK-e-_gDKLKDl)ltekKp~JY~GM|^a$n* z9-D$fJGOl~iz#}7MUKG(W!tkThNH9CJ1BadHwC8=Dk_*ahKhG2&<)EOgJ(`e^tOso zniDne=P;3nfqk-g17E4C-4syP zSW4Cg%?=pcuk)tNZ^#;2I{Xwpi;(68t=7Oj8)FE@90vF6R&T(J(Q8QL1a?0L|FtQJ zM8#RbU~kF}%De{N5K9_9T+RH)rXr|t(->}(~c)^G} zW9HpW0cDM)xc@KCK^U*oS)AXHO-WBx+-S`F>!xr%<6k2-P79na^+CrnDQ6AQ)G1oc z;J-E{olqz*$zX5F9?G-^-Vlo=g(fv~qwI&1k`}>Ks1wZdrl_RkW(|qSxHt#Fn}T0@ zykWB`>0OAemzj4r1(fw0qV-bAiovT*nct8#B#Z9i9E4vt1smU>I?;2#ZuJI?^1X%> z0wu_d!GCQ^GJr15LGY&Rpv+j}4GHhgYPj^aUGRT)?@Cs#^0md4#?a48ujvj;m+LU;~@Y*wYyC7~^ z)qh=X*{}lYE3v?>yie+|Fx1y{j2sv07%+3o&ojK!LFkL#IS4E795+H4OEs4vZ<=y^{AdLq9xM&g>pfT_XJfJBlw zwpQ>8txZVIL0IkJMmz=><|^IFISAVw+`b`Z*UL%#c}tR_LR^7E0B;%TBzo4d((ATw z03&3Y6L3)$Ya*{0T-wB<(hZ$3qH)}*my_tZ38ra8b(m+GPMW{Uxi3^VXk>$t1bE;$ zW$+%n#z@}KclSy3{FAV)3_a~)-&R!T19jlz7mUzJGn-<4Fh>YO=}>ZKj3n6*j~O#J z!K`RxmewONo-99aIKBDEgpmZf;$&y$pWrn{ctb*~<&s_);~tG*sikaSEf8_AVEzpw zZMsr{J{nu7fj}{pJ&d$?dA2^-1f%S#Y z$U0$TQfBrrlB{-0tuy#V=c<{hY^g9IIv2_x7h?n?Lz&rPB%yn5aZ}gCPZ*K@Qt=O$ z^Ge?M0VB<1oW1)SygHXDp--)IEl_N0QkS+ENzGcEx(qf+qQ(C7hPE|3d$+e3j8KRD zokkMHOFlR=&qrwQ)FdSXUmSy}MYUooeR~Z)k^^g7>{h+o?%*|20bT5tt8IS63M1un zz#S(rc9gIAT(-J|3bex*XI+I|O%t*U^5x%*vCrVe^t{8!y{mac!tO6hPW_QFHkR78 z0NMS1tk_kQnc^G7U4xCCEuMEZ5m}67@ZJ`yB5z+6sndELw5y3vyU-!wZ^k%g7CnQW>u(rIS`@A-1`{o9BhwqaQJs3bsZyWo z_y;y1w5G`)(VYzbi!sK(ilddmhe=c0!$|ufC=mh7lf_{V=PkHiMbF|oV1(v{?`%Lc z($*g8u!qU~gb`}vCJBKmy@I)A{vR-su=~r^{8<;zFPD8fOyi~00>v14UBBTCBjrGb zoEzp}Fw!OqBvdvfcOMv`CB-{N(m9qUO$?ro(B7#@w>pAM8BD*h0q@@BnUw8|N}Rlz zXSa+op{legV(^gXrZ>zOX(gR-V!)V=kYL{8yxn_luQn4s7{S%C#z>Q>+p9@_{tY92 zQruomJ4jObj~HpOhv>CX-NaP3H0{Q1nBXXbzwF@l4ULh$2gyeGU27B0o7bUVfai^E z;7**(jFD#Mw^@Gv2_p$SA!BMQf5@LxNax)5X29G%*ku|n=u}vh$$6-oXQ1dy~VR8 z*SP{D#z-sa*i-&KR!IZ`;|y+s z8E=^M7OkPphUBjR^B>2mh}v@%#`yi_=pg^351xI_C{Z zuAw7KCAo)F0Wl6ZLX2L$sEC~M}E#EJ{;+p{T?Ub6M7wx{DL4UMH{=EMeVHbow_%^Jo6MbBFi!f%*F`_@xjT<))v(h))~ zF6T{|d_Ik`enZCCjt z2fr2Fvv_q~ZN%>{o3ce&6IT0<$x~dPl=h*__hQ@mNoLJS*>UytrlcS1h1Am@HU(E- zpOkiTkwm=Nl*u#JYe?O9(s3Ewuj>Uh%1pXoDcOjG?J~Gux1!_e4X2_nOJbb z*d;V)Q+E77W?%TSS}i5y?(a=$-|_cWekQ4Lpsd%B#A8B@nYmxrXK_ndEkAz``FUE9 zP)4V;@rGoaUA|}eS#KaLaGdtulq98->c#-kCcCf{6=-uCGYUlm17rDXal_ePR-_*`3Znt!5>BJ`fu>(JBe2F z15_1g3?&|kqPWaFmO~}#)dN%p(gTOGUQ*|P!J@k50rHLKHzUjFqNx5S!ng~lR!NSZj z!ZY*Rj5H4Gbx600%zt(KbDa#Rb((S3m=oiAP5g6Uqzm^vV@*P(l;GL%&-0A4wl;{r z-0oKrtaPlFtsS_gf-+{vC2rr4$UO=?0|vQAlp&nxdHaSOSjRsbMM~P39w4J=ydhCTG)!RTu^=kytRBdT zPp87G)xH|Lr1FUD|jl*EgYx5muG zjmb=Qrd=h~Kg&AQ%%B(U40Nk+`X!`~QBHkfgfQx+tp2`A=Fxf(?IR%`ji4m2a zfeiNQ-ymIpOY$t9C0G^HUEcsD_&N z%slxIFuS}xL0Z9JQv11oVjFO?L9 z>yZxPd|6#@{1-69pXpXvx7FSWf8Y|Y+$t_c!939}aF65En2zE1>iFl^V||kMu*0U6` ztK*;hb;}z_O`YCw3zMn%=ZT>#4^Xry4T4L^D|(VeQv`oS=B$3M>t#4NGmNZO=k1&)8-E^zz6p%+;4^RfZG!1;lQ@mI$` z8@_4BwhzQnP*Nf2k%qw< zzB>MSJ($L&HJI9<8JvsW@z3kQUp*KJF@q~K@#^^J^Jn@rc@Yjh5vWn_K zWM=Szaz1CW?f6OlkcKL+ncJKvc`Nl8GP4^NqFa&ws>En@PL%Idze{J<-}j`k+?Wx* zB~+j@@wYBH%4Aav5E+Cb3QxE-<>Q!HlDqQPn|0q0H#7`=o%+!9Pf1oIIlbg<+6M+F zQS%3CIrD*j67X4BXrJMox0{kY!P-f!fh{U<5AiS67n0zB!bN*L@gd5B+|)nj${W=L zCFVvXVbT|Sh7XFS`Af|~ExP`qvNQLR0k%?pA)1^vF*tLGu$EWf8_% zROFA2{z=IhzNYmGKdJS{PwI3==pqlC!F)ue8GhdVlb*=@uCmo(E+%zB&mC|H1V2FwooWgp!{--Ta%`qIOLYPH;s;`}uHIDdOWjDwHrFd2L zTAOBMcqUb)f+WnLDb9xnq=&iI{n#`z(2En@?vhRYECg!CJq_y56VOIJJ%brEwcL`x z%M$8gci1xSAksUQH61FNV^JroOcIqn+uz&@WbOj?9FS}CfUCqB>23%e$yQQRM0e^j;~fUo?ni-LI-f_n>qg)#FH)h(v|-#9(?*z%(CJZ}DQ`ih7X$*XL&k zaKMMYFlsK$lxmbk_zy~RLk4B>rL+ra>T8sJf?SRdl@t065%wuq$zVy;s+k8)$L#t{ zdYGEUC2N-W7$v~FeH%qMM44aOifnEkw+DmMM6`)$N&CZcR*)v)lY$N8xUV6Fwk9YA zgv;S5nn{({fau}-g;TIMbD9Tn*)lR~RwpHcF)h>lx!J4hfoZmR&D&-6!J*laRyD18 z@cLz$KED*krRB3Yk#;BUaTN*Dem_=Fjb*vcGetvlmhLJFyi8j&ZLgOk_uS|UwIGPd z%3TDd;J{7xF2Gn{RKi)Et$I9pB&cT$e&5FR)vfgsSc zo@+d=c3*d20^-l=+P=T679y~4v8-Yboa=q^i_HtC6f`V82i;SpcA=;G{dQI@l1SvY zn<}_c3yQe)WnMi(U9~~nuF=E|gjiS0auq~CcaOe8$6`Fx715->RE~$@V%T>1MK!SWJ168s zh2r|0-HPt`rE)wlEiM*Thv}T2OPq^7LDdi}#T^lq7%Wdq54dxDpZES(VentMCXiDY zEDYj67(2so_wK9Xr@nS(xK}#CiHbZNk)St{;p%snD+MJqbA}5S$QB41V+CsS z5q}YF+Zfz4jEA_N^ic4D{2nkYo^SB+nn2J5sY1v7(Foj3=nFoyw2v><36qdHz>r`iG0db)WqjR#pBNX= zDpmQN-i#=&QLT;=tT63i$wot!%msaLQ(07}b+uR{@c&|s%GotSRWGr?pkG4vo=cDMkW+73jWahUA<7&GeMb9(E)q|=xWntS79-?TX zOo`D^W4DQE!gW2!X^45_D0-e}Sv?r6H`O7wG|aW~y~QRY@OrW-W#SWiFekQDov$`9 zPYkMIj_iN6y7?o_SVVOQQYrn)&ixH@0&uQYH$MmSk_01q8%hn;{mACe4xU8MD|C(P zupgvdNhz`@S_}@%o@Mo*Y(xxb)0fpvvBhr$Z&Tb^*?H!krPcvfiZ>7$&&(5yL-Fck zAX?A$O3@hxW8+v6KJASe?{*mbVAtO>HDr1<^3Zemy;NH*hCHAH%a@ z&g)URVH4i)d8z3=e?5zyy{rK-ogh~b6Db=ea~3^Q8*TIAUMh)ZJQyR#^(_88BTQun zx0sGxevRU5rZ_I0Ug|jyHa4vIE~@x=y)iAC`zFdSA+8kk1B}<5=5R%G4uUM2D|-&Z zX}VpE(TXn>CHYA6Py`&48`)-5OyiTIe{wTtu=q2BxX(O@ezFXr$2&i7e2z(g_KNXD zS@Goy?nr*Vpg$6K{gW!!DiPq{d$8#F1KSc`FgK0Dt#k8l?r`)$@oI`;2Mp0OAIpNw z$85E9TVw-Sj3ngIsi$eN$l^3N7d&Z1`tP&!V2Ex}pPJEiO-9ccx}j|7yl3I@vxLzh1h z5F;cR>fxsh+i$XcKp7d1!9#q+#2g&@nCvQw{Q`!x3~%Y(}S4k6Jl!@81}H~8k} z!Ba@|QuNHh8(auvp~Z_xiI}rwV=FizB%BB~1ySAEXp-htR1LhLoxDl&MIuyJFo}mz*I}OzV%>u<4|q|~{}=)|I8jbbqWw_!_f=vU86EOkxRX+~mEiS_4h(}T52tl@)Vd}_odv=NxHd>n3f zHHj)sJ-Y%%io%x%%2Y{pRTLfHU-du;ALBWiuCzZ^F)qp%`k$)5_h+AV{s_Hcu%-qf z7zF8ez##2{FZ9?a6D4pXghaHA<(V?;l;W24iTVstne#6DYjG3fpBL+T$O%hv|D-Tr zPp4Yp7CI3Y$7Gm8+hTgd@+d?1=50L=Bnwb~06*@s z2ldi3pB;k?YC+)F3`AuK{5KCLEfC!937upkg}1=y#SJ0wv-R-PRdwBjqQT4aMxg z)Ir2d`L1|kn)qRC4K32=6w<`lyVxOWkzO2P1P-caVw}j>V=z`?NR)oKFy8u2Ll*_n zgsXb^cvY13Kj?c37BHNso`v_x%LQylUt0>R%vG)dJO` z1q|cYqN$$Ppi$#j)^R*?v;-MI!#iPi>D9~O;p4p(ks>`7VGR?6-L*PBof-i2;}Bs+ zGhm}RYs4Mqxmpi$MT8|RuORYs{<;)p3@I2TkGcuY9Y}an4dFWT3zlDlCS@Z*Rme?r zJ27~fx21^1#-QP;@ZkJ_wEslyy@^i|0Mwf@D7G#)?&P2?|x#^8`#)=#__C`63B ztKGdgj6J=?`q65s2m25o5auU#BgP{&sZe(p@6>g!rc+40`df1GP@J{E)_m{coqAT7 zUTin9lHlVJB0IR|E_!YpH)Yg=i^U;*pssh(v-hiJS>boQv0-*i?5~ZjMJ^_TRi>1V zwt)UPDYoQ@lsL6xU_@)aOCk=^iNO$s)j&N0|NDgh$#_SLr`yvr|GQmjAco38L(%2y zyS-%{jv;c`p6CsSI^zhcjnHONrG7zJ>-=@1W%ooFi`4p3$=x-1bEwuQ`m3c@S=Qe+ zg5g636UDVHOZP95_HRcNE|(MilJCCh^|fG ze^cu|fwrAfsvm!}KWjQ`fZSioqb~m%3wPhf2A`>OZ<%}ShY*pp|f<|x={+~jn0Pss&9@o^TE ztqy#GbGSGdJdWQm)~*ix`H+kF#9?@&v+aRzpCqMamIGgdhC+!^tx(ungABIfYdPRU zm-E0rs`jFAv7Th~(8b!G5_Gx+J&vR>n;5S{M!??CQk8TL^)2OfTq27FY$lMT4^khV z!6@Qu8&5qrPFK5vY@wKEq07|UgK74FEWpgXeq5vL2g`t#7%`gQF_=}ekOB9&VEe(O zAlwLdVM~pHjWrFpg*J&GNfe-g7lZNhw51ZJzebx&=_WYf3q4A^BynOW3H|zdBbW-Y zz|YwK*)&!U4kqFUqu+hWZMl%)#u!%*(ze7Wiyj0FUf5^}F!lPy7#D_rL%AJfD7eS2 z&2T#yT;6W!&=>~ac1>tY`62;5TIj#GXTQUv>T&N7L z#jMdTqffuF7kHG2qC3X>wM!_l0udn=nWIINCY14dA*{n9h}dtvg|`6K z#IzgHGXQ$j7wZ6?MU$nmQHk)V!y2qa2qmv$k=0|6A(yIZ`Ov6{mg%k9Gl;<)T?iK| z69xXztEqVYd44#qhR%}c#yR42z~OA+Jtp#r=PJEo`$#GX)8$h$*Ykel=3~iS0~&2f zf}_V&4Dgpupx#|j}2b$ zX^L%828Y4?zd~DLs*n|;_mnV8D6B}Hrk`x!+Xjz4CCHH&`dEY93|1SAia2kuaY8nc z_cS)z8@*e+K*fsw@Cdyd8w}C2cWt%7g+?i)`nGR$;=rJdw+&wJX_B3=*l1?r3Phm2 z!CUwv3=(n0VA(By1EwQw)F_KW>1)Vt#lMZM~L9I?l$OOXd?SIg9N3gU?-bm&C}=~zJkwcwb-G68hZ zM3X`K9l*dD%bsD%g2py9(PWUSP&b19_=9!lh2NNH))h?@84wto6D%*%LK8$;h$gm= zq+cz0a}3r+pbI24@%BiEbL*JFRP9pW5gwaqBJ8#!5S4()-A8A;J(>s^B*_1UpRd^} zf?v^u-6iG|fj@O)9CxjmWjQZkpot1G90ZEnGuthtYq*Du5XoYVil{)LDv zzsK?CoPMBgmNya60*s?LNcnY6KLj}_YD+{ZtcIfzdSuRxVoz_P6aSr|JaoEFN~e*# z_z8Xaln-KOHpy>jW*SCYY;1iGTWz!q>MO;K?T)G1Vi3n%vPry2%dI4=e2t z`Cxonh{VpNPG3g`7e>oNc~tB+4`MS|9!!`R{%hNZ#w?D4W3cq?eoN=ZlYTG{){EI7 zkmn^ISvCX}JH++rhQTv!kl$h2I4-o2G8tDovx_!Rkd$L5TLb}_U|6)F0CE)b*CBSO z>r?d17f9#N=e!xB7IngeL})Ju)e($OHZb}kkr}#bI~e&U$0mwwpNRXH`f}kmL5v!r zf?e9`b1uhb*#a6ig6W6c{$$?deBWI6_pf>T&UD>hUW>&PC)jwcjJJFeCAC#Z*Zqx% zcpyGn_r*IVtIdOD zOz6PrP;0*9<(hGc5lKTQwHDY2ZO3q`gK)4utRo^^0y+DPVI21LK<&CvejjmQ_*!EX zAVyx^QLhr85xJ-0vUgjV+CQp9R;dB#c#i>R4dGL4#rm{k)gSJ`Y$0$AfxWLEY;o;j z7NOh>C=SU+#X^Py?tYDEJE{xKJ`l95sMJ^n1#UPd5AxaDUp*>*FOCD z3Wd|zT;71Y!dL<6EUo4vK`XaYZGa!p)k%Jd3rvE6#l+Kgi<340*#_sq+H}MvO1u(% z;ovW~s0XKqc59e3v;|K>j=m&=YM@OW>EL-g$&_5)k{ZZQvj^-2oG*^{@%HUHDZGGa z3|0~eeEAxYgNKfXE-~Orj!F!|m`TZGB?-+ov3gI3GP#nY&XcGef*fY)ii;!F3ymmk z0Is8wW1~t&tyU7+{hz|hS66Zb<*ifeTC*8+Hgeq(%H&Fp1zFFtJnEYa79TV4)s-CO z5TjT02G7gGUA(}R90PP^tR>Ff8<_;IT(iv6-2*As*7$h^^H?33sfbVV)MS_ z5l=WNpJz~?YoAZL;DReTa>g8$!5fIBE->c3OsdM&l^mCi#e{%2i@9MbV}iRwWN`Pm zlH<{wxAhxRfPw6fV;QQw9TgIy$fvIjbDB?FB)QvBAt4Lp@1MB1*eJEE!IFg9N_u^N zG>-n;Q9%*h4hr3gO54^;G~0Y9CfDRldFavmJ0eJByeoFgb%89a%(>nRu}QJ9dwiG9=+&(RvcNEZlf3=u z^ot$}r;oVljNUoMRfoYR8}UUqqPTX9WZX1N82CEddd1WgSkyC1b9Blm1|5tMPo^a> z`T9_Gnk(y}zyUHznJcc;+hJmQWFZ%#n>`6rc*?$ILmS~k7wJW+#I{gSqN;b!4!N(L(+TBZ;>DR=rCw(w7h+6e3ZSOoc~EXy5~K ztS$25`^Tcpg`ew9`yXi-rGnL-qVlQPIR8@d`u1pD(Vs+fDlmgB0#$Xv?t;*cmM7~N zFnl%&yI}PVkz-SYuVpqEJ+JixP#A_;ENusZh6)=u(=*n$eE{m6y@NN=^Una(J6l`6 ziJpbc2>|R~FNm8jkjYg*$yOAsD{O~3j&O{QAR%?wOeDC!HbKWp#L>XW=nOU<(Et&xq6$#oPLAX9{(rE>i2Dm1{ z`$+~@=#XIuV11+MUuRauNs!Jc`wx=#vR)J8OOh=iS|Uk%9ngpIeGSW1bX#2+yD6St1D?&peP^$-m4V9U|i=)~~ zYs483we&EeCwzmk7#EZiuzi5t^&6s%W9(({Kpg+vK7e`iBe683c`L*JXPe0!ma-`{3);*`mJOu?pvSu339y|P7~)6zThYpAUD;GbWpL^{u81Fn zZamQJ3)@c-KXEXtMXlBm#z>{c^?{{0%$PBau}%p)0ACEA(bS6?8^tz1xy<611FEkOU&kxbFVC3Fnx zRclBZ$P;u)WbGFgUi=Qk&3;D$p`IV+&zO-TSJtA2X3Jqjh)k9!nZ(RI7A1(CDAq{R zZW57FrUrmua3igp?|e}654p9E6+#nChWnMx{-Ul%coI1`vf|Wvzcv^B6g}5vtO6Tg zX8jP+KE$jZ&ppzEca9NyrR3!tW1QCSa%Z$ZNDx7<=~?fE5oo?p3si{s5M3HoGnG_xFCRiJ&f zuyUue{2bUX(X#`MYGI_Fj)i5TKJG~$gI6%Pjuk;Jgi1LHfWnE0SSU^aB{z6!&@@tJ z@F#BgPxy}`CIE!%f(lQ|@JDI=PnT&c1pzz<1PF6AIGL%kAcJR%l3=(8V_KVMPPD|+ zW!ic$UNY|*jB!0U9LZ(cda&z>3TDchu4ax><1%eM*!4s;n0lhC!NJB+^xT6R#}MHP zd8wg<=VAGpC&PgITR4 zqn{q3s*o>C;uskWGI(!a$UIh7xC&vxKx?$&2tCNK@~J~eDJ_O~3~7c_(U6-ert8;1 zaZnJWti@z!&Q%1QNTwy%KIlEF@~TtTf3$H|DXlE0|~7i=gFii(>yR&x4~`WeE~^vOvX ztNhEJSs2QP;-J39f|OGJq|UWKQQ!bcj^S~z6VoydHLh(un|qs*UUhg?8N7HAy(tR) z8%{WxUSpSs_cNH>pH*V4OwaQnh((e5S`2m$K6isoghMAjkEVX(8lt;`Ku$@OP(?IxqTveLqLEgU$m0NJ5eyEJ%rhQ!h7XUn5#+8C z!(zbNYU6@1f$c+St@6Bc{c;RKpz}jmDKK>Nmn)lK1i4=N6W1Yy zf(siE3JWAk1h+U@WnHe6=keJ=CpdmigfnC?Wvd)3xy)7py?TAmpWNE#{p7(){CIzT zzp-`KyWCsr*LTItvktCI*Lb)C#)ZK`+g-I-ff*0=8|o4T&CWB}3CF8_TW?ZpK3um9 zhHH!fnC(L_;si!LskSUUH*^BXsqflH!|P8PX3_JVTl5R_Fm?Y_AA}nM&luqd?5qC9 z{G0`fo`rm7`sk3jWzMQsTZfhSKhZNM!?cT%$o+zjjDg5=LFY29QxTIHxvX^Q@LH;f zeBD*9m%o8Jk%z>?G8m3#d-%HktlSP1d_s^(zRWj0SU5LT1LKK%h(r_KqWb7gyJ0xJ zgif4+nm1Ry#SV;?EOApu*Qu>-L6*VLZjq1V--m;}FJtZyfg$tXP z*^9EiD@2IR%aQ~BdIDHD`OkbT`W-MacvSyb@i9&|FtTQnK)k6GVB?R7gJXe~uT6d@ z6Fb)NT{sn;-~?BG)4~=aaj-7T5D~@X@egDj$n-iA!&wYM!p<6(b6&cgW$^G6S9^U@ zOa0(Ty5RP#72_x3ut^M(gngd`b ze3hA?&d-bBqz1rOv9dyyVL|QYML7D-HHyy%4v5CW777$3m6DW(q=RUkh=$jv_jDsE z`H@LD8mQAgAzbnNO1P51I)tMZS4cV2rh>K-edOt5$B}jv(42%6-v*q@Q5cnT*)Z2u z`!ZsS#ExYN;{G8bBzWc>tWR=>7^J_p+@bv(MxIo>gU2u!hb5OA4-nD##iqPaUii&8 zI$yE1Xl4K5hsAFn2Xhj-}&@IT>XMtF%+91XJX>u6y6s?+bP!&P66{cH4Llai>y6Ccej;I^Jl~hid zw4+4%Sk=ToKg_k@)bt3d$Z76|?x-93Nh75DG zQPtkq(OPGKQ$!MaxqY2O?G#9^SZVG-#3+KIpAL%F?H+lBmaokGL3Hl?fJ#XtIqKR` zV?3^@Vn-Agsav&XLrlMLIl(}<-YAex#KGh1)MVrKb*Ma-_1pr6q_{+ACxQ3n_H~!z zix^}$lP*Y!2NDicPDOVjH)b4q^p6o3P9Wd{WcU_dFE|R3+t#jtx0?pF{htyoTkJL>r+?2s20f z-7@GpC8~a(z=CRDx8}I2yhZqdsl@~z`_qf+UR+h;g~x@wU+^RStClAzI;ayp7f0Z0 zt$vgXP~1>4J0Btyrc_*_I3L|$q=9ipFrC%&G)r=5NXLMDmAWJEzk*0!cLE%+7pM^9 zQnBj+XF)ecsxdUAIx%=X4-Zj-713tE<)ADqwXkt!7H_%{MX`E$*!Z^^O8i4F)NWum zfNpf+;jcKF;Mw~0ag2TR+pL%&P?&_xUda0KR6zSSE6!Y_$d(1;>G6b6(L4b#9Kf;% zN*Da89r-pZ7F@r9awtCV-K@BmHOd6ck0(cltpJQ(R@w<;JwW?pjWEaA>@Nj`uoDHV zBN;ddi%?XT#bwpO+!|3~H$2FZ0r+wMF*}Ne4jTVKVsd7+sI3C3o=peI_kP{}7aJ;+ z21W2vZ>O|r3;+PY4eA?&s9OIX;t)9|bC;kWfx-31L72}9Zxda>KhU<^6pn?7onRqh zwmwoGrY0Lok}B%_z!~`G!eXnXFiVPHjD->PUCFo#-uiIiy!d#&Ke(safVMtq8Sjwa zOABV<4AvGvR|;Ze8SjwCzAQfyGgsIzo9t~kunkCQi_bEW3#Wp??3~z19T%}ihtN@5 zU(g>(1dn3(>5?FF45r>#m(mXO4%8bnqg`o%iYnEf>GzS$V_JoMxD^I&Py(&ERi-1D zOPpXgs7188zlxKMuxkk_5(G6Jlz#>Qs$CG%B}?|;d?Bn7kLE#&{0zhi zL_!v1FbQzxAX;sX_6n@1^hlbTdANsHL_WYu1t9cx2G1~l5-dyI&_bAK7O-+R!OIwI zOe*fRv6@VY1}oyd!ZiwY+hblO@UcEUp9st*bbe!S7YIk=;MQ0=y@;L_)1C}t(l#|S zm8ianR#cTzRs4j}^OMISZzV$RNo8WOAk87JkEsKWpuf?xmHx*q%>FbO62kfDLX9Q# zBL7aE6WvOSLY?#|QvVn%xfq!O^5!?vGx&`d4PEct^@yvEHKZ$_k%VxFdV@iLK{ovm zut!Zt9K*apSu~|M(4&xFbrxuM-(A-lED`9|DCw*6ou1lmgXB@ z51N8KE-~b?^L(kiA(g=WRJR`&{#44!0Rls3;^f66z*vD{U0x5I;fc&Y&T_@)#;h%Y zQOCZ%Mioo&?gGx^c>Z#pg8ErP1|X|o`w5)3bz-<@Y+(BdoRr;sQi<5uP|G@r63H`M zi!4r(!pKR)E$jM-=q2L}Cz2@6BYwsc{u}<|y@c|y&HYIP(T6xQ<&X8H%@;QG3`Q6FXTHNM6r3Pxi{kL#)XL)x zR0e_Y;f^J9J%kuEx37l|fo9g5@Bv_GC)W@Cm<>|^zW${6?vww0leG8wMR_0o z!?wl$Wc(_5!%bm4pTT)BNRYBrT@)gG03%BxFzW`E&PS&OYESmpT-Zn8K8e@if2ix^ zA$<@FtR(zk^-_?}KSQw(7c|MTvK)i)__{B{Zw)i1N5kA-@meH8?qK4a3C<;$8*f#E zfrzvkygUTIcz_-HJ12aV@JPY@ID=z$O>-Un8>l}*FTy%O0L4Z~yyc%NS%C<|(m1(f zC(j$sV458Ubvg>r24O5R91#&?n)@WOPxM#h^eo#hbfR!lAuJ5FqPV>W6HfAQL6c?m;;zc@0@q->AB=5^7&GSD zM;kP)Mld+nqWe=ah{5`>XW2Yxx*0Qzo|}03@9l$8Jswg?i7U{qsfk(-hwGR)Yr(0d z@o+(ton4a<0}pmm7;8QupnO#g#~`I$2a2AG)-0fsUK3Lr3}6(h?jq_SuDh65MiYG6d{1>(_B zd_c5MYdvn*9a}p!nwXidS&}%0cm^j|?|{O3?nproYe=MzNNM0PSPO*;c!WBYVqHbw zeFm)|^&D`yNm89 z=urZ44-#L}bx$=4ra(#31H7?G)b#wKM`l>cG)PmJ*Bt}1`C?v&&|v_u6;ZayKo|_7 z&+3}63RV|_xU}eZ*+qXMA};k6C510}HT7tDB`mQq(2EBCt{smYsG;IVEe8~4LikXj z>ZC?E(d7i)id_5!SOsn{04B_>Mbppn**~OdSw{N8UoV7VXyl0T6ZrV4<3)qbEKvMe z)ANNlM%_mEUFXD&Jo$M|mfl03@`sOnyZG;#nX4zCFQ^lL&W8aaZ#eiJXZkYHMI&H{ zKTCoEADm}6(~|>xVcX$(LaV4cRg@mC=igV@a%+K3C{y?lNu`?XL*{?Y3g;BZ1E2^J z-Y83u80LGsC2w$o-CdelU@-jKM5W<%!No$QX6ssBeOTg%Am#Xe1G|nbL=~F?x{bd2 z^nAyogg<6xi*;52NTSlnj)zGJv&Z6qG(qcWPnAU%k(D*R5!sD8>euLfgyj%o*` zOp{8afOA5hguqF3Xso9!22OhhgD4n`kS5r3C=bwU@g8hM6^nAi@z&WSb3_m^e>{(TD_wji- zJi+?q*bkW_4F<`;AKpUGPde#Zf)|%W+`tUp!YIYe1O^y!$aKm-veCgoWu9A9qK`trGGEB z+d8bS(E7qURsUXt`~W=fE2mrbs%0j=(9<6Di7hk}wFaaXO|WdFQ3e?p!}|25U?c!M z{OsjBL)T}pKk`x(2JxVbt$&IHuoz?Yiw^5Fcy33oG|*RkfyeugKZU-FHdG_z?T?2D zP5E=kb*!!^`Qsr%&gu_pEx8MsS@g^YUW!f<%wc&xkF~I^fVe;ZIN=H3nyN+QPh5a} z1h{n4Q5Fdj3RYOc0`NLv>_zy)@b#e(?C>32bQq72?|1-9JyuR`JyMxffPgRqZ5lSt zNN-2QtFx`x#Xjyf8WFC-?}Pvm3Z=?d_>^10zAPo5PZ(4Z;%j+0g0jwh0T-1>S5 z!LJ~cl+ZvSi*bIE@A*K7VCF(vAbba^wULv;CDsrIC<({`D&8*4a}34|^q3%A0|irW zLE)tM-T@&Mc*jmoK&3Vu6+bkyw)F?J38=o#PtL!T58pjl? zGKsvkpWk#<0(B9{@Fe_=x;P&mC(-jB>i*rS8HnK(0|nPxXhZZ&-xt?Ml3$qaG&F%H zul_?9pp3Z}>KA6P=vieQ<^?UpNgYGhkp5#74qz|~;~X!ZmwA>owq?4Dp{$&)WrU){r8f4|=H8NeKg>3cWXlKu4{C=nO2$eRG`u z7d_`5C8Ou>O-a=k%*qU2F9`eIW({p0p;g?kn?e{Aye91H?EDMzw0l8k^$^>f$=0sV zrsv{nKW<9eo)QQM80Kpem2KAWbboqc`DxFjGq|C$&8Ebh{Wy!BCo01n;Y}ev$eTi& z4RYNh37tjH+XcN5R&R>wdBq09o2NA-z#VT7Gp`qPw%cPqDQ(`Ra?G!rLgYO>B_w(g zJ#$j<{MUjwUT>R{(51&k^!&D<%^F&&K&8k@fhX;s6vZdi>8AAf-j!15SDV6d z_om>r@EWKHM1`R!pA;+pdsE`beo$0)|D=eo_8J@p3L#xY&+AROqTy@^E0PeU8GhXq zj5e&HVHQ2JDa1R}(WeZ|+or_9_8_|V?@ejFohYQgHsyv1Ae&-(-Y#f!QsU6&u4U%w zq#y)%Q*eoS4b(qnQ?lJx%Nv`cza^{|)~}}L4V9S%Pn(hqng>O{x0`aOMb&y!lC;Q` z&EQ||6eU^=Gdev8yeTvmt4%@ZdfSx50uaE*;O&C8_hJjabiB@{U`A>xTNBpfJ6ZD- zmSpgDLEAON2~RCc1}~d}%VP^TDfrT;*^Y@S)l0ocX8_40nbz>WiTg_ zZIsj^(U?Yx8|kC1pA_8DcGt~KJnWG1NNO5K_ZBc*+Ff-cBqwWg_GG-)+lki$3WTFY zcwftSRKg*!q!xG+J@3m$hanoL>OZY(7>w|KAD0fP#BPBqr%jK{zvn??APJfRO$>+Kj2D4biiyjR=-K zv4ute1p3~Ml&$u4_|LuMR-R%-6n2Uj`| zIm-j6CyFZZ1>yX6(etvg?`V^D*tTGb*Y7i#c`uWr^B^P&E_$Hq-+)ltKXE@K>PJMi zd&wJNgP#4ACT99y&*s3?zj(Q_Cs674|SV`gV*@k%Z!gMVkZM8?OtZcp`(43~&l zVXB$=L-f37EupjX0IIJx`&bA!*%7gu;Z9(Pp8w(;?R&O$uo>5$g!F~! zbU^qb3unET?VTdl0VsaFr;2kVhw_1j-~($T)0)}}ZNk(eEEn;s?v&LbMGzi)Nz_DZ zKo8OL8p3y=5@Pyzh@Q7brsbY0cGhMVYM!$`ILDQn^oPrnQ1!ZDNpu6|1wv9&-hoPa z*vCWkylm|LUiNiNz|A6=FAVp-x;)dbobIa(UZD+9b)XG1tAOedCCX0uhW(Xuv`3Aa zKFs`PFkJ6tD`I4a19RV`@0X`Bm9_y7+&?XZND;H z%EX0l&=X9XQk;Vli34p!01|**SJSh>DE|CA=Vb*L5O(KFd4GpxhBv~7YN^hO2tvmD}&$7 z(`(Mr#P9Hs64e^Qcc5Bg#5)eilT`G5!q~6@s(IbZ^!$Tj(4yK$w%cKLu^g%S#Spt` zIWyQ<$5N1d2I^LvgG!lWFJ&dgAro!lW%>CJB|U-qkC*6qtKixsr|#+jYHW(=`GHGs zfp7y9f>0{yt4bzc#?|nBmT7utFC#ybtN*vCc52!%e@4S!AbbZZLC+83k4t_I=ZI8P zYBY{W1J&V}ik1FTRBMr# zM;OxhLsV;CN0`h?R0G0ypn38HZn&Kw!HBN=SU@YvdRG?qS_nU2-Cm`LQZ7t8{Z{w zko!avQ{omVKc?<5-cusT`0YLdmzAt48L3k4Ntz%cV99fJW;9Xo(db_{#f$5rz$k=Y zk_1n6w>*;TMI8s!NTnb!p>Hcn-7uJ;Q!XVga`z2Yz&HVCB*?|gLe)A3D{0r~rE)c2 z^Zd;G>e4NwNod$N!57mIYF?_ci9V$}3Lzc^1|@lzSz+*0(_V(Z)ZRccEf~m2(Xk=8VNC&8Bq|7ev{CPDk`c| zN++NyChgQg3MbqIbFH~}FcBKyQW+Ag`N*1`?XX=`>;dWv#xkyWbxolQ1;9v%A0ju3 zIL2y_j_*+EAxDy;)sF!PmCpS*I&H8(=u$%n3)^F}K|2`)5g(bB!JDs5>NCtu+*m%0 zcKR0nTf?kRl#~RPBWtRv54>#I6%APvBa!yEGK9V4pofV}yTWjCg1xobh||IKD!92E z%LWMSifT7xR|*SVy^?A>KCkN90(a-Qtg7UIMGE7jHp26&IG!S9m5Oq)2m%QS)IEZB zB0%{2m=N6ORY4MzWFeYC3A=9(T=c-R=;2Ai0$4bTwb6`3=2IRJ?H_SY(?sN@CW2iZ z&8I0}W9K9DVcU+LIyX@_@d+_A^AOylfiA2=Oi#BMc_{mgq+OqZ=T%j(R9FW{Fpw|2 zOko#YS|OPlbfjP~vJSb(<}hoPs*M5Wu9ZX2A;2(ym3d~(Qo#@LVw6T`Y`6**ADwBg zI!X)O=(6NBxD*qCT!GnDu%-n8J?$^elnexgWato^!K#9_;sP21y&YZM<48=&ixOiQ ztSVS8s!L-~hG0~KP~8>&Lv_3O7bRqieb8=$!OKf$zDq>^jV7Uh6#G4`g%}KD+Ie}s z4|D%LmM1#G%`-26=#DrIK+1}Pex;eG)lYzsYDo$%C>%43p20BDK1zi_8W=^Apl9Ry z@C@gD(icWT$J;>7sG0}F5<;&fMAHz9!EEx>R~Zg+qRp!EcvOyE3CcZK2gzTETU_qs zy&g$q7Ot0-^bX!c&)zWxi*J|%8@oZM>OY8{$=AP$p23IsCrn`P7D|X7l^5ZMItv$s zT3$?o--i4-lV9Z*fZ`Z@y}A2~LKAgjIQbr9Y z)Mo0t%D=gz6z5Q|Z;nr5mn|Wo%3MQKKwj`yMXO|(`4VoYE;;xhZflVK2m6}cYtaTO#|}}78x=qp z%#iG!X!uiyNeDMkBuJtl)d_qkQ_&PRUQIgzMRKtLs@^Ez(jfKu(8r2j-Ef&AvLUS=QJ~6U0GuMpxa*11g($OO$VRc#Nha!QYHu|)Rpd#~d zm(s(C99tD(fOFJ^#vU=4Xx=`7VI8xZg@zL6+Y1yuFZ5;F zn=ygtz%~U8qEiqWkRy&X``{;C46|572jpZx=kSQrfip_9vp~qwG))p+3v{-tlVc7S zU;y!is)ePp3&JkXP*IBfRvjTI`pRL+7f|mB_dBVpUZ*ZeUE6Isz2siP}LliTEMphdK?rQ_9r0C|*I| z_``|qKGPAWsN1;@6xtzB^>q{_V^iWuYCxz+PgUM~B}$Dn4Q6IeZV-hj^S9Cq6;MQL z;W7vFTMik$lwrUWhIyNhsL7rpo|JD3hx_v_Q`phKwSHC#IyG0aUbc!uMh-shmWYu@ z?^l96<%qRjx(j0AVSmk+-~^D6(5GirutoMIjYf5yM&nv3QRzh`Qx$LCb*QgKFr+r z-s>38>5IGvU_O~saamGuKxP#rs~4OQ9i7NZg7oXYKpD|stfI17Qs$fcfM`pCvpHjW zhZ`9$dE0x6rEKz7rCn>5Q@~H6ULhVeUOx( z>@D_06HzV84LlD#p2Nz*V*ofvN_f!~+Pv;W8#hquC4Ay=ph=}qb?%gdqy%;YD?=Ue zKGm%F@gf+=)A&Lp4#cf!^?FjcjX@Hw*2n)`^FEIwv0^%pDrTC(Q4R2w|e8b2x*QAI2AOG3W;s_`=D0o=^l2{z==<3|6@r zvM|{l?nh*6e|_{%vNNF#)ab59)TPWvAyj-CUceY3_ zuZ(`hEvxO%J3veimZ6P$0Gs*z33)7h&Lc^r0PPIsBcyzw6fQQ8ohf3{-Exo{0AtLd z$GEusn6_TvVdIK+kFWxIIk!Ma<1en>f|lP8;=3n=IcPTvl%b6PH25+z-NBh~3}!Wh z)f_-;Q@}~|>@9FB(wjVXQ1%7-CeKg5LF3(jfbJvAe0q^=_Ic%%H{PPIHp~YnOR2jq zU~&>YBbG%*WO0+3sLI5^B_n#DM9-9ov8;%N)r@~jN^vP*A7ZfT)vk6CT+nuO;3eVV z)U04Id~t%92XWGrSw`x;wZSc5)UygCY&5mW_txogVU9f+_tD-!*NP3$1=C61bKo6d!?pxQ!%ry9QWUo z#a99`WPeo~dPrWfQ&pQH@>FiI7Np*n_9&%@wZpLSn#qKHq!O645hVjE02q?dr2($P z=g+FNMnbvDmMd6>t`o~pVTp-okCn|;(aRb-Uog5;XoxOz8aiwjP@;b`gE!n)7{21} zh3)ySiU<37R?*Ac&ba7sGcjM|0#(hp7=u;xQX6aM;B$L0g=HD+0<%gS%l27tvoVsA z>((Bhb;a|uDn!#6XK)|dIc~TQMeiEVs(Ai_!ist+pTgYRN4t9lOq{_J7c=9c<_hsk zAWkWf&ubF7o);-$+F)$I5;Fp4p$wuGi$vq71#)SzK%`_AiO%+E+>%HHPPr1yp)b$G zmOfoM&@_yRK(8&D;OdZjTFo)Bb9BE5R}N$YZQm-at`Vs>B)S3917Ki>KJ>NVF2%aU zI5$^i1VVdVYTT6r>tHtRt+yQOH=UBRKwBa>3(J=GF8)*G05r!T3mbIhK=|(7%@0)+ z{0Xmnx`?X?pLVOrE!^3<$O~=w)G+bU$siGPPZx1j&f*0JodD2cRnL>hsE7Xr&D>x_ zf!Re|#njBSNx6qi4;&6B7xQFhO8Md_G!Zz^jp+o5opfN{}a)Dq!fo)>WlLqORj;(MWs0E3;@ z$vi!;RR2&3YN`^~GxLqX`U;XkQ^52NhArMp2;s!ubvm*Y_dA1?zQq@GhvDbf9k^k2 zfCVmLTOhhvsp`{84)~-fv#XR-HXN%s=@HR=PMAhQWA zn&TYN)u*2qNU~A1owNd-w#Agc)?s6ASH5$dS+;K~{+E3Z+LN$CKQGzJAUC63+erhU zoL3O$^M6Qy63%`NmN$jC3l)f${K<~p7W*)lX~{MOq%auY0CMt{2Q0_lVHf|7P=f|% zD=9Tl(j1xyil-=3B|DoEO?=QM^oeNOFkF1~ z3y$}RP^DG}bxKv>N&Gu4-=J&_=9D7BQ6_<8S2d*Okx%5XKG=~6C>_pVduZ!mtGX5b zTq zh?(ZE6sK}~6RActxQbLLLx0n4wS_E^p?E4(VZw|_jp7RoUg&nIPi;bnZqdqUEmX+I zOZYq8iW@fF;>+V2!TYqO)rCC&(fKno%dWsGRaBJ2=u?28Dx>g1ZGC)fCvzu1OmsaQ z2f}K2Z>#NOFb&4y*%f9=+9RHLa9^}CtZi*d+9RI$v$=X0JX6vh@r;>e zSC}bjk9hhrvYl+Eq&?zsqV>Pq%XTtId{C17Fe092dgS2^;_3bZ)}nPGV-qp)B=GTd zQ5B|%ct(-fA|Cb=t`?(|G8EX@vAMIQUpr^6#_1?}pqCDVS1c}c(1Hq-k}?D@G~H$9 zzN@pbKN~n9M31eM7u>*$ccY$EX7^-Llng|o?@r5sR>wqgPQ#n`g70aJt zC4(>U2s(e<3|ILj`;fzgclknJ^jG;M^~$}i?cI#k4O=&ZVG;<65-OEjt1GOlWY>0e zUze-=lHS@;Nw>r+9&pD_@s9Kp#XQbn=^o3e<3i@Dns2AX^uMn1OIoCciOg3O`>2KM z>IS;t8OMe`oG!tgeQeb6%FJBzyze7?qz8+hwO=~b@B1iO7jH*gP*>4&Z^}>mI0lQJ z^?Wh=y(20MTrCizEyv2tUXSeWqsxH7?2$L&LOG%sM%YKZkVM;huJ4Nkg4)z-CS3fz ziV^1)t!{vTcacEA)lc19KdnTGe!BYSu5O1k7;05dhV%J=uf-tU4v0bB%S<97e|kR2 z1)oIM^ON$<{Nzr39lOt~qXFmrE}>_pBnH=|Pc$s^LN{`ROhONz5URqb1X$uVj&)yp zFAFCsK^-s;$it*ZaEnO;9db>Fwkf2{qF>}1-QUbdUHj?LE)=ae3KliyS&J-4qzIhL z0^yW^fo{lgk&wmTcuNs7rX$m(zU81C5~vnhV7RI1xZ~nC(7Y1ob`FRH!#tYpe#z*( z$LKWd=re)L6f-LQNg2C{QPbt8z@t6Lwc>@J4dt5iHS>9_1YvCkuK6GdgD| z8?)q;Gb_I&a?bB3D}?j{-ac7+WdCG2W`mO@raw*=mDI!`IUBKh;c&!jrjy0Fg3{L7 zXqP}w2T%N$3>G&ne*<%|#oJ2sX+DfAR{pqAF1;R0C3^cM)bA~&Q?T+g7!NC6RufDD z3FaGZ3b&UvreKzRcmp@82(=mzqr!${U0jYRrvzW@tf`1Kx0rRaHKvq91%7dOun9{j zE@4)ZDPH-^tPl^NWpXoRsT>4BgIa7;n7*Ml?8)V=d)*x3;TQrRCj~9pJ7B%s8UWUb zhypi-crYhcnFCTn)D7Z?KE6)a70axi9AXj;80`3jAC9Rk7OnFW-Oj0#qmk1f^?D&5 zk!XqC9ZGYPwH$g?=(P&WV#X0rJghQeh`lg(ydS}a`zih+!s~IzdJvZ4{-E$}j3TW? zlheH3RJ=&G+lty+TmPuzLUmWO8mxuH5n0!>&w=8H!{0+fo&tMxxg`fm(dx)Jxop~ODWrf7&?%jaofw| zVklv&V|j`GuuZgQ;#3hw=I)}JfPyrFCsusIqu>o@sdRlPGE%L%7=EBT9*?R^Bng8R zX(LFbx(QMXt}K`)PArb#k;o^|t9gStBCDtSiw5{C4n`HFNg43aa-4zX|9h7yIu;vN z2Fy8_!uB4s>QGl4e84<77i)*qx6CRTo2q={a}v&_g&N0-Q*C?0p(7@~MbGbZ@sJ+J z{jA4_Fe7@_Tzj?)o4>)dLoPA-e~X@pH7!i406-s{%}vJ5_Uz!$+&N?k0IP4{Nh8^C zoVJS4&Uejmo}-i6!f_JvN`wl7|Kd1pVZg!nH^)iTC;>{Ed9Vx{Ki@cxl^1aw1yKCX zaZ>Q(rPdC=iUCg%ZH(PSGiZ4;$5B_wi|`9)%eUcejItik)z54gPl-LTm=!#B@!fnyfPU}&f7T|X)AZk z$-pvGCqtyo!C6~!2qu@qDe+#fms$r|@?uk+StJvsHk`+_gZxf*RV|6UN}$w-ViMny zBv@2phuqE#Cvl3{(tMA>VD*ZSxhs$9ZHX{I+SSZb3w-ZnY z*pt;o?0P6LOPU(QE_@8fQW>g*k2YJXt)dj}5cxhn5)K91XRu!N8$OyE2nA%YJ?KB< zqg{jS8)*Fl9}d$&>-<(6vIB{8z~w!jxPjJ=ZxsQ236aMR7W8mVK|C>DYu&nB7`n+t zsqM`|1^_5Uv_Yv9K(jX*Dzm`SgIjc`b*OHy3|3IQO^L`ez}tfpeE-7Rz+jB%o*79m zgX!J0aGj(|5m3%xxd2)QS*yzBLrLwc7u=QwuB=qEuUCu_|5;2?C}MSc$#Gy=X6H5^ zFze!<#q1$jbLcS#+Cq@VC5U+pZVyVc&aLW^780;(2E+Q5_|w<1!Xs9yM@JSbvVWzA zaEKCxwmjbaTuK~<`R!aojqz=s0wExV!8O;zX-PDzDSe+VeuS z-QD+8+Ou41YZ4x0C+#_ECw^=T5pN_sQ*9Jl5Np_8?M zQa5da=pYx2f~++WcU%{z)=i^)lUo_y^8F>>v1nAOZi}P!yhNJe`%Jwznk;DMS;f-5 zNWt~oHZOrHsIjAENEUg!nK8Z8t3RCPii}M&uNJk}uTBya#s8(54H|*iDo9NccNcC`lDJN+ zMof$z(K64GXX$VcxE*fo;wJKeel&TB3us1b4HI8qSoeM8j-Fsal65OzVTxf{_K5tp zy0i$&f)!$`Bi{#U`O-HApKv))5yp(NuNYWX>t}?WU|5tBp2J}Uj77?P^w~L6q_P9X z2y~e5d_5%8A<&fgs#-<^Wq>60rR;6K3?)sX6?dy*ikQG} zo@cYADTYwr%(7}I{)(dtB}4WO(!#hC6cMXt*s#k9B@2qAqtblDC`u=tZ+J?hNET7T z=KOUFTH?%ve3Y<7oGgP2Tl+RaQM4!3C1-{ctR5Z4UCp6SXq%mvE{3rE3Y@XFQ3 zYwMv!oO4$iR!um+bqCl2t4=7IEB%Lv!NC5Gq@*Ghvfv}X?1(4mPUEih|JRPB{-S)f z%Z~g6PK<+CQW&hLgf^NhT4_rlqkp|JJE4~M#fjsFKwOrI^WjSWmc51zj58FqmF#v1 zglx471t%}JBRN1v!X8UCNFA`*I+`aFbMOL z`V1%R7z(Yzjs$p^&11xR=meaI6eSf9gCyD0Gf?+>M+dt;bOHn+FGLhjp#uSQ11~r5 z0{#qG7ngVYpZR?={w*UW#yX<)&-C34 z7z%Cne9TQQCi;@zqW{=r5U#Q_aa0pq!C*e3M+D*;Fd$9?BLsC(Zm%PGzthr%Y%m71 zTci%+nUEqV?V;{ySUcxM#=vq^L&FSK9s~_WT+SsJ#t{Z_u>icaAF-7|DGUa6$Y0c} z;VObe=nd9bq5K?=#5IdiSTM`c!w}Wlq=p2CTLdrh6{)7BJP+`AR#L~1fAHEn1^B3w!pm%n_~MQthU$jep@2Y=STb0Fs6Ny;ik0zd z1v`I%9;zgy`p)eh*9gKOgGrKSUS3Ql{?~J^DM1Yg-jXn97#f3t8Y#Qxtg}nh4zX1| zi7Sy{B*l)w%rYeaIw+9mKu#;{vf#)I;fu$v+vah>4bnXIKLs`Q4dW{>GkxheqVU{Lq+L`en_a{T#54 zWClHy6eDU`Yw#c>?V#%VjfdY55;{J@apIz)oSp1fz+xP57|bLP#OAU7>wNo!iDRhgv!tr zn8CAKZiY4B5ocUGNVxyJgNyHKerU`rdd>{f8KjOww5L0Z#8)LViGG|6R;|a9)x==TOwS05)U4-@MOfgm3M5-!c>V*B5dWv>xzJ{eG(7|m>L&t73(%iV6wp=p3m~aCaiEx~gCd>fB&HPMVKbdM7O!VGwfP6-1r1XzY%I|pv`4IIq(z5y4}JrruQdY_%kP1Txe}95g$g4pK>CVqzCl%28zV~AmdH;( zv!m$=f*Q`uC=r7N>;aO}h033*)BPJDJu?MG!A4)e0i;<#lU55SARnxx93SOJE3k?PGh z3vi6gv^9xcrcgD5-wA#VNV9^Y=(!2zCmKob!h;jJieO09UjfolJ^02V5<8J=K6y>> zL!X4kTkLniBMKWwZU&&Y8|9l|@48`~$h8CW>S}9xb|RNy{*-jz$kWvpn9(c}A40zR zj5?i|9WWBtPp08M>Q-Q+s6M4@85}S+7aP8g9IsZrUGQi^-7X+$CQbcM&HTp=o2ot3 zk<{P@WOK1K!;BA+-Y5+2v*zQq14wJ8E_n3&#fBynZK4VuAu;{k&;yKtq^ZUgn z^{D9C1&>-J3r02j)^!4&|Ij>j(4Jo2cxZqKi2Zw;4mit*y4?tr2#lFPMzv3A{ zQeWn97Cmq949TtmiEaBp9TbjWD<1p?$ciZ>hmGpBf>{-h zYr}T|X?cU{y;v0f5UB>9MbEbMt|^bmfdwP+?kI_#?Uo_w7eM;J5{fQf_d6g-2Bmv} z2k!zhn8Gka$$+gU8_c2<-vV zeB&Z|7M_2^i26;0nO{WD^YuWN><}3{H9|alcdo)66#PA>CnW4asAMdkzO` z`jsEBv;yz1D%bSRAi6xMdx4$m1*ki;V%60V)QrBm*8U3bajKq<7fz8+S+NnoRXr;jrJ@*V? zr3Di`G+AG>K+*Hg0$YgCL-*#QH_`J?u<|C(HX%v@gBbO(z&C1Z@Y>^W6FoD-bOs68 z+F&J4>;viYs|8MH@OuR7C0v7e4+ifna5{tE_)rQPH_`LRDd z#jwCC-$6y-pveOeJdWTa(XYK5l%gd1jxOW+gj%n-THnJPviE}1z*-XN#Z{KKpiuPMc_l8TDn zj}s5!E8?z;br0|E2)tZ-s-5~GCn3v$t8>NLZ7pZwQa8JWQP(S1>m!iy02%Zr#BxD4 zlx`2Q2VvX9I_CARS5CYUUg*LWmM{uo;j}+o9yla%N8WH*)fT#5Ia|iOV31re#oiAG ztLhaY!?usa^~!N1=^}i=V)4Jy4B&%(qBFx0+6Q&r1A~NrT(#|g)g*Xo1@-aDVE!h& z8erOqt#DlMNbV_4A_*Q0w9fY+D1tKDIx5Mj)gWM02O|?0#l{(|na_BY`5lMzlY(z7 zkW$Vsh2x^c;C;qL+NFkY4S_qbf2mC*CIHyCtB+>^mz)qSR8ZB9DS3lekJFgAZ$U!i zWHTRoeKi=~N=qAlimdRU%m1u|n)c#RXvdCeG6tBHpxc{VbZ*xLXgJj1e$~Q&x*SSa z&^*jR1eybA^dd_d+YM7viehvtiV}L~50pa(1bnO%qqedk6ytPlcU%&8;pGhOC@6N~ z0$Gr#fzyxS?Uc(c!VWOFu35lifabIaL2fo}Jj%~j>xEYd4xj3q{X# zko*XTwg0@poWYN5io#$ZQFDv3SvJpTr2~IZYIYGmEHFqtFw>(rJxo<)0gG5;hFpjDj<`AqA-2VYEK^{C!7CRLUBGH_#lmDUkINGWo(3;2qFr!e>c!;1 z;D+r-aLS*=y~N;#Z3PaoeGN`h!4P5?EB3lJqOY-&tECDFY*DoZClWI^6f(aumabKe*Ca=4Njb?z6}W>-`}|gC;Biu4b0F7VRH+r1kPem zhma~*kC-S1|3yd%p>*cOpM-P@GgqgBH6hUsWX6tc!$}jwcBR20q;hEO5>mp^iTYyj zUxbueSmaDFc$bg}A8K$Caf~yBu|m}_kL`eyU@z2q2LDA!NtaNd-yej8K${?caKPY# zZGFv`rQo5AA1?Gd-M;4~$kg$aOOLLm8xLEH5LUF;=b~2-XU+w)VK|bb>3`rF!>bhU zngSNtl(xT7+ zI3>3`If%3M8`mJrwFiryi@`3a+JbY+1*9AZ2+3SyplZSnS`8KX8yli$iBfC@sM_Ni zj}Dln;|+t?{Qe!BlustYk-;0T@di$Gcyd}8EPvn%RV%JRjzah|N|Ug$cl}J@G$ENk z?_WOyoaAR%6rBi(ILaK;`6$FDg^f@!JEaiFv7*hwc01#eVo4GjV{LaYxL<2D7Dq`W z0fmaRz@I73*JfHqNEc-0*$seau;I?Rr2cpvSlRp5?v@>JQdnK>0wu2f&h63#%K2-O z?N;u>I9VP=^We_QX|xC;=CvV)@M zzX&OT+NCZ1MM$YHhxV+Q*MzjWHfhBb#;E#{e-cuXKmI8q`cFcNqn(rH!MlVsuFYg^ zqy96vc5XOxDs*Y0>l8u#=Twcq2q}4?}GU zauB`mTwkUs|3KSC4K6-6P&F=kBxZq=xHl|>49-Q5khyX61)S#3vR5q^{SSl`Z^S=0 ze{9%(0w=Czit;ftgtTESYj@wR%$3ycVd!6k)Vh^=4F5?;0~?~}2~~r!6d{$eE!r~eHhs20?wg^dkvPVeBO-M7JYi}bdbfh7`y}}C4;m_6cF5bom#htMG3^(F{+4A$&V2JN= zvT%@#64VKI?O0e0E3u{Ed(=aT9=1u*CH!Nvp{P{#YSzvv$GNxvh+5@c?IN7up zH)2vV@mw&tQ8Zy?I)hYz?j008{{$=G`3$Bh!#{QA=mn0WzkO3{6V9<>hUpA0Zp415 z9T}=NHt!&3kP8N#mONe@inwejs@77064X_G(fLzHXsZ;I?|2p26-$w9-pqr|o3IhmlyF_3RZPAWYlcS(W+++&%k@tw#R zcLvM}gnBJmE0OtCL<2DN^7E5~CNTN_p0?M?P52*UXS&!*t^(m#I*ymvKkY2<|48#y zQFKePlZo%0951w5Eg^)U09yQ^)%d#;drI&iv~KnNb*pNq9T_+A^UJ=rL!9niqY}$o zOVX2{TUA4C%T?MfEZh4ecm(V*&qLqdKic`CEXll{M__#DtsCa&Osy~EV6YA4y}FlF zbe!&J_j&&2p-kSse<~h>>E5MIxoT+ju|ISlYFx2r=8CyhHPo_B3Gs^c+7GX3|JYkq zLvJzQD2IaFQQ@`cR`t*U$(3_S)snUma9)RbC{qTW0>oTd9Q?Xf2WdqPXJH zRW+1+GsF`^JkD0HJ@s{q;)=CLL6OCxu5JC?s#&zaE-ridl0~;LFNQKmFX~{?GVZ6B z%c;AU*ezrt#4C#uceM>AE-5^~CHa-)h}Y}B_6iEr7#>o~1B6oVbMclc^CowSldGf6G>hVi2sj8+d`$tYqKaw@V&2;X&xUsKvn$l5dR;G zwmC~_K5$+Qty0))Y*h_4i|YA)XHkKI!W#l0ZBo>wgarNzU8>*-7G$V4r3TjomA6+dlT6@gOLx5&3lD z30(Tx-D@9^645QY)!x_c-gibrc}&rdC9-*(RZ6FO?U*A+B$Kko=j?HK;bu7lTjVMM zvSv`+yp3!3wKvnG*<=+!T!P@auid@Wt0E=@+MoK`-E+bu1dNkaalsy{m%iFlnC>OI zLwG#aV0d8dYj>|r%F5L9zbyKl)vXjOBL0y@6~Nb~p-<}@J}z+y({5+nGL)^HIMM!d zU*}cWs+t;>j-;&~g&ZP)gdaj%40=NFlmCpI*T7M*9Ku!_d~v=bc}*Be8N=DFDC;^Y z4Xr1rq9XUoXeisC!zyN_0g{OWOE4;pR>feva?M56Oqz3sSl!rUy^pvI+$>EK==G1{ z>!bGddJAf+n#zYtm@j=~cO1Hv%s&5wzhyKmT93q4PdlRa4wOS`ANXc-AP^; zmLDxkjqg=44_S4mZC@&QyoNU6KAgLgcr%h)L9BD2`t~gEsynGThOY?PXSHu0okT~I zn<&KNP;H|VRk?u^N(sHqpmhFM-AVCtsYhyh9;$V8&QYiYhWDh<%NJT`O;+Oi$czl} z7JnFqIxM5pTCQ@#4Ql-B?j-5JKc(~hUq)&*UgdIjMp~2yX{BbQXa!0o|KbERcPHWB z>7BnA1qZ5Wc`1l~noy zMJlO8uAT;Jx;9xIWj-lp#5`Usl>{L`q}6wAD{yVHGfAc@)N4G-&}#IJ1_H5fSw3&% zuSRAg7C&!H^vZLER7#OvSQ+9`-*8xO33H5ZG{dY$Fp5ESI{s1LaQ7|=GBLWeylg*< zgOqw9JG7d9)mN_V?j`X!h2Egm#xp2>TVK0-(fOTC_?5mf4P}Q!+N1xY=t}pJpY~vP zX>aIeszr3cVV@le0$X@eq;EX+we^k8#-jcrH+am_9qd15;&pYhKO(XTAp%&)ZG|I4C5 zIvtFEe$oV%q1+}#W9o6;4AgXrp|oc<-RmqN#y|heqR49DqU#`bF#h>`lT6+ms$i=j zA~hj;B(Yd{&}8_Pl~no$u;1Fe$cxFiZ|C1SF_2wfI|<+3k00P}zjIhVko%a~zY`!t zXwaExaQ-8ZNQe!`8c`(#yK`p)s@j#?f*AC0k?n_DnW#_-`PQw@t z0=S?;beYW|$sVuoH_|4P;+Q1=mHsi`I2)5ta?Td{{a_IGbaanO^!P@haMi(LL*g$E z9x_`xf=aPSkHEzau3_I!smEm`y&v^ILaxX$Xx;JP8uo0HsJ!x+z9vr@s)K9TE2kU@ zOxVKe0%DDn%!Zw|!n!)U6+UwYM#%?#LwY!*2Fpd~&NKAT{@E=|7Y(VwI#uU~gi~7R za5F|?xDyW2;5$j=2l3KZWwCS!T3-~X%Hcz>Vm+X9W6lolFGIgg8A3__woHsldU$7#9U5WFv(oJ4W()Aum*L!?m=OR$d zbt2)FBgQGBt-{Dwy47oE2WP?RBhx|`4RDKSn2kr^ML`K)dRnJJ%L00 zg#C>#OGpH+c*Ypl7dK~5B*8xmi0q`t$8Zt3v1=@>bZY^5xygdCn?@A$Me(KUBiXl| z0g<9m{dm{UqY8|nepC8Z56GS2$6l#cQm|TI(588uFZlu`6UZ{Jq`<03`w?rz!AiFT zguAi#o$$fxJC}WQf~HcsA5LQo1c2V7pCT-#-n;I-Lg%G)1I6*9#OH(R&%o2+o7OPN zKH=b_Dq1uA#a9v~0ZLFrMI4}oqvUXexRHtzcP^S1>0!u02}t7O)-m{>p3+a^8JyJq z1J5k~RqI=l;7-T!DMr+-=GJMMDipNKUQQLba4lJ#IhW`w_>8}(#Qd$6*}|*eIZGWr zm2vdBWu?ccemg5`DU=0yGd%Ftp{PwY(1z6LZV=z z4&lgu6NCYVxhGdT>m6c{3y>9f;i8Nvk-zfni@28x)7-?`a68anFpFKBb}AVdfD>6- zs5|(g0erGmu{eCl-PNM!#YG06q)Ors0B5)VKa7d+IPxn z{qP|xShv6f4&W0Idy$L*avXpjzkI*}e1hyuj4cxD*9SECq*I)HI$t#p=mEQ{Mg)x5 zpDELdh~oIFE&jktre(4`M`>ZNUS^*xV_M`VLP;1fInn+lo&vQ5Vx?-4Btc)o`HF~> zB#{8RAbMC1ZW->RB(W5aCMQzpjeu5qCcz`ehbjeFroIz4EJ1d^D02Lc&xP*OL*9R? zjtPG;)Y%*iEb@4PnA`A^p6Ehu90Vs|Dz!r6WIC@S;yc-y{loe2`F;iZP02A>zd8>T z948Q}U7!Z<6q@^uSnbnq2Pz-lk3!{815Gw8n3fwzrzD{w!Dnc4oXg3sNY!;)c;WSn zmLIFp${&zV9oN{BIfheZhI%N9zNhHR16)7>6Ve<4^eo+uXLN-NvfDZTk5DP;Ha%Hp-;C`B-~cfCpbGV`d*^{_D24D zaGrdPGJ6r+_zHTD&}c^N4Xs{I8gySr9a4q@Cu%Gp=ZgdiE9*enIOO5+xI%DTzqCXt z=Q)TOCjes>$n3rreJBC@rGe|6FGeAWJ2k0@*9YwW*$ox(l=kHo9C)biewlzZ+ zKWQ>KXY^g-kUNopoY4a%<63_7)8#EN?p4>chPX1L!TOcxqb3Smb_C|FL9a(u)wF6N zDw~qu%eLsMX#+C;e!RFRGeC#R@Q|fRKCKUIEZRA^pzM=YOcUbO3z;p>%%IBHIy_dJRv=Wyut=;OnRXw#5&Klw?mURgibyyh2Plfmk0f1vD`mh%`Ht5Se2orExuQ zA*bLt#Q|DHy;E`hutQXtMP!COi<<;F7fNk5;sVst@}LQHV?NCC4ldYhEPi!2R%d@6(PyC z{t6V!U}bkZUB%A~&&JDg7H!0t@}RO#SMjs-(Vff4_O;4b^QlwrU(JVN@iX~&RIFJW914Q@;LMikJ&$f8_T<^Y&Se!t&12waqdg~!*ZHCNO1pvZ zAsw|7Nv%-1u|eb}?H&pY#2gsU$4eA}ov^fqe;4xQq#KG1I-(C!=H-)O7wFfu@sF1V zJx!q{wJhza+Tjio+w`8!TmfjQ|EH)I)TE&0;bBUXs{R8McK?I;rW7T_lWLg$Tk}i* zlggF4MryhBCmv?)zm&Nge9sS8xhz$1ABWnl_x@YOew-i15#?c+FYvIa^Zd&}+7l|> z87-WhkKGlcBKaRYj_*eS8nAz`1s#;+$%aG;Hq=;-3)6eI@8}jVu(T#Q1y%5m zlh{0+KHs3MzbWbfBuCna8;$Coz9CgwKMu$JhT6}E8`k-cY1QyPFvZ@{?r(TYrCdo( z;RjSBRUPDH;z!~6AV$rl5*17fF=jrBa!s1;r)@R=R6!8pQdHQx-J&0hBh2S#J#~`m zlC75qQCTr=>zK-m1h=;P50(2QH3{!nBDyHVR2eMmQ6l5s$-!5T z;wxE$`;}XJoR%0*j4Wt(v@w%(d!_QdJA4iKO;!Zhf=WR2C(n&gAkrEg`^zag&ZF*Eg^2n0OhoFKr1*Xanc1u>8Jea7IWNo6bvSc4{{bAETp6e_L)j+&%Q(*Gnr`ycesFVI&_XwfWf0;8u*!HEa|q zlm!HK-+ugb>1p-!0jl7@88X-%B84?ZX@i<_4Bmd(w~ij1x)O0g>T`%$n+OIKm12WF zf~7mrXypDKeI~1xm_8B9rkvI>g<~k$TYrFIOl;Cw4i=58$8v7{CZnWxl_2u~$U!`g zX9&h2{7Q5grg?}tvYa2^%Q%0DP3$7m6;nY)WdsD4TlwFu7_g6VzF^j{F=cDNIEGzU zb0DC^I2)$+x}97$6_%^Vzyb!(oH}xRpmi!w%25y{A(Xby>hnJi#poqyV*Zhw;_!p_ zrT|SYoa7~zjx6(|R7dowJl>6Ob-w2?mK{r8Db19g_KOFnHTy+WPVh)~80g2HB3QaH zmL*3UCsZSj zdu#OB(sZ=r5%lSr(EO z)*~A*#25zRo47E|bby}jd@}cxBAVBrS1upI6w&6#2YS8+)$elSzL#Y+aRD$r!~_4~ zBCF%TS90iJNReR^k+csqy<;r-PCV#obgcmGSB-j`rg6~vQ3JGw!65d`^V~IS z^pm=^uuSc;#s9riY#3s- zDPKM}B~9zGqCY@+h&8lF+)1~df4Ab4q##oovY5;lopQCmz zd%BQ~>O$>1+S@if#d3YJJ3|6*_C^&o<8qha9Y`)a$!;!n+Cqxnlew0b!QItXgzBUd z7R>MM9bUC{1FQtfe}5ky!S4hyIzWg79el$Z&=+gxB~(BA7G@-72Ga`+j_;(2PMpOx?IO(RS+g;KVeny%asMhau01w|Tz{17G<1GmFS-icB`X(qzmvd8fMe^PK`?FCJk(?D$;9 zp$op3N8{`3B8Q9B@P8kOY=o4*=JcZTI9_%{I%6W=OO4VyDyzpI!HMdOx+upeWeae* zkdhq>GcH5&4Qrpn$ZTNe3sP`RMH-LByL3r*Ew(Y*;O5s03JQ5lNv@#{97h&u9?L9= zl~lLIwCTU<4q?nX-t3p&IGz0|AQ$kN zV;)I~IqKbnPeuQ2^Dn~)v6SVF{CHzL2~eja4Bmyq87U*v4Gk5CA?>g}(9(HCJNfRE z_DKpeVk*+-jKaa=Ja98IBJeqC*Z64Fxn{0+xyfGlRGpLXzdaIBbH*y69I=4`)p*ItH%5g~iZ)#@2Gk|nDP7z|8sQ5FxOY4XIGAnVy`CH}U5uWn@XJFhzK7&w zr!^XZI1J^BRKgsoutQkMl=l&0j69tCQR{GWozQ+|cGJqjdT@$!1%5>HD;`G<8-Q~il++I<$|jQy=U;4ii(n_SpV9L_qt5hh#}i6w^3qxg-f zoNQUBa{#Ao*;7LLQAj?@(Bi?bhe2wX`$@@gAwi&aAs=u7P5^g6!`1Q$Cja~;i?3iZ z0&8)6(HvtwI^Ot~uv@un`6a9WOBP=?9bu_%9D~^(ZlL9~Jz`(GfDhA&HS!xf1+#@T z1fwM~+Sv#jn6sH*5rb0F*vTo*#Tr28#^R~-6cOhtECl!qw#x&`;`@Ql;8A!mvaFjP z5gyTw_eyI|j&yl2@WShCOrdpe7#w2A=WhOIpJL!{^zY5FssITt3Prqg1!s~N?6fh| z9k`6?FHnXjOY@7gW{_LqJj-+?dpG0F8r1g7a<9w;)}>{xy4SN@&N5wjE=#B6kZ}co zk)dUkd(65Uv9xBS<&vjmkR|o-RifOw8fyvwm9|L+lHnvo%9?&8BxU6#$H8A|+1J%f zRhS8%$tObgE5K(Gc|gqMe#z9~2V)rL2Lo?f+;=$|0EQXq4V!4SuRQ_ntawGHb$t1r zNvw))xdjxTur|>VEiU9{bu8IlFGmBw2n-WqM#BWm>gnmd1@iPq1HjM+`|0Tr981RQ`xnU(Pl*S)hC`XGdTks%^?BvIoCCp0C73=)#by(?iGmJLoA z-Le93mAPM_2`aF_eEj!0UzWq>g^d05e`_N`n-UKa+6pAXc?_O2eOBlgPmQbu8TL-q zwJRfWpRLBpegeo5V$3f-=Y5faru-S)OP63M;QVv3u8qk92eKXzK4!bp8llpL&f_IW zFI=tF#(5lD7KP+_paW+HV08M6F#IONPmEIlsv$m*LRF)7Bq6@qq_#UHUJkf1k4K%O z1J$Q&9mGuGg)g;4bjTJtR+u7}38Rfkl&PX+21H|UM+f%4ewOnZIdDX7a6s=PCcp>J z(L=YhNRiK_tUFtW>?;5sMHV=<%u~~3!WKCQGRs$TZSdr%MyhyQ^D*8&hO-L?CbH1G z2P`$Ealz-a_mw7q!8*5)j+jaWflJ}-i$%yyaeyQ&??MNcn@Y`Fo;g6j3!6sWlz46{ zj2=NAg|Q3oFw5bdo17pTH;A?$^;yJs(x0e1E^COn6WNd0_@p%Zt@_t?N*xs$6#2;O z#7%jH@03?yXCdH&^gf9oCO8gazL3t!Lx3isQJtKS9bz^tpd4_sSx6ybQPDD|izfdT zA1PwA6d;lkmHc-qRJ1U}-&f4is&oijAH*&ASOi_)gHtNX zU*oeX6{^cpunl%kU9t{f3$=A6{o|tGQR(y3jknz!h|KZrobVsUMi0o|lP2NnZa@7+t0bCyH-)QRp8eT#{u z8wL|I8ec4OtgR=>b<7t>rF?=<6X)y7r`VY(gMrq8rUn8R0$IK8DEk?mlxrcNm6oqE z+96=b;K^Jf8q_|DuKv+oQ*Mfpffzh9Q0nVDyD#=sYo5$gqmMs0N!A0}jJ$w;Ft1^WFL41aGF*af9P#BPlYU?QrO%n2XVIFX+c z^4gop3dH#lVjcRy4EYi$5Y+5uzO@a--A}yK+bzd_!j)qkct_#?J*$O-ZP5F4EUQ4qWWPS7$g{53mO?t*{}oFLw5sX1txBy zId3>ieSR0|C&*46a=$e1*vmkY)5*6a*#k+5`s1$3c!Cy(@b%tB-5mt~K~nWSn)&6< zM!}}<=R_{bz=^j++dMUi=vfiR9L?ddi1$_*I+dcsfP<#MLomx4cD}htZu1!f06f8WLd?+V@zr%W+g+FFr;j{3eYApudq5WGJTHA<^hN3+ zWs721d|U_WDD1`90f?Cmm?|3ehh|f^6f>!xxW0LgFNCU2(|+Gc#;E8Das8kziIHM# zGz<(m@98H^8T%#?YqNsRyfCTv*cva0xL0`kOQ=ShE^;eY4g8{|BZ#i7{g^AF|9)8! zeY<1T$13tE9NqOzP!uh98GyMkK!DQ)RG>zalL}D>Us)AK6zg^bx71a2j*zJe1-ZTp z#1v4npSDE~BAi8oo=?qjW?jlXpOc4_vSScSZtLf2ryu0&)Tcl2!l=vh2bC z!wJsQgvR6+^fCjT4eB;)1{s-~Y}DfkcAGf%yM8A!#UKF^I>B`Fg<# zjuH_k$moZbUpop@zykD${lW?DbGj?NoLS@-PDnTk^)+$sN9d9d^!Z8M-rsTzX_HLv4&TBv7L~JJ3`HL zY(_5bs>Y#~;pwKNK307`uo(l{-rF0RXkxjJxAxd^sGwO}5`9OFlwSEr~Em=CdVAZ&2-QgbCuB^$CDP@cZ;!^d}`P045hvHK~g1|wo!S;59N zc$_ERN&Z~j00E%2Ebp=eb|fGy&30(MpjtG3+YrmjEVWQA>=*UD{2gbg5bIENXQ##0 z4}2XXnOf&ElV7Dg0jRERi;{j2Z#xEu-{y^RlZY4KgmS3qRs}S1#3UN5S8Bo>qyxO% zXHMLytt!$1ge3+7cSCFd2EXyfcky#^hIE5{48~17J`pO~L7b7M9qsO%7q^kG(a0*s zbOSLd1XvOHe9+**T2RYdDKgnzN7aFE`(lkQvLxp2j&Fd zq049TFa1sO4jviexy9XcBiNI1AqjnQict((7SHejAhRUK&|_aN#$rE9pOsz7o2}{>*eV6%6FI@7 zF`mVbKEyI~3R$Z*UovO$W9n=ZAq(-(%vt=H1RS_fA#QemJ&$ZEwVmM^hWKaZ4`X?gWp4+qRsy&Ho?Q2zjRn zRazzj6|>?EBPv<}Ql6IMi6BgpLyJ{L3{Z-H#C1fD2y9lAh{4V)lmXB*rbBKRoyrdQ z8xf3$;9{_NBJ#Kt5um`rJ}+w5)@s5hAX&@+FjFx!d_`Rk=IQdKY((&7Eppz;l-rVl zlot}V0B_U@0$dfZE5%%T83*rnCkB>i#T$dB&BE`f@?bA0MC~*TPO3!u!A^w@pf^pstFWkUo2;_NubDhnGvQk z(kfy0-bV6-Up|$S+LKiqx-{l*E!~=f59yM`Ow<#8DvVNNA5|>Y(vtoBWo#)>EdG{% zNZCYobF7}x!^-x@W3w-PYHY{?fb_Nm6W&XvLsTNwWh3Rz$3xb72xS39KZT=Wvvr67 zKj5LP7I@F`XM2wyNJwM#1n7+c@7g4C61Jd}WkgsiV0`$gL01#ag5dnVV5HM;F}}Q8 zP^$XGKuE`TLUhoF)9m4nJ!#`NAvS7Sq)$Z{Bn&LfqnBA&H-Y&cc$knTmy@0ve+0Qv zf>%KYf*8SvoFO!TrSf@2$1ZYw*_x{Nhm*-^ag^_=Zy0~ZI}yb+!L2_02rLdT6ZA}; zvh7*pEqo_pMl7sn-3S55?1Y>jA69zxtkV4atoZwAk+~?sGI7Gna&_{o1k?b}winl} z0*Yi&aQ47WS>{;Ul9b#@B`MWYcKagKZ&8@u(NmFc)g}u9{Ltr2sNc-s>ZJe@+#I>Z z7C;f1y+t!Mxf__W;nu{UbT45l#G*Rjh(2VIH7X=eEr@r{xzn|HsOsAHM_(D9HJGuc zzK4TZXI?->b&paS$_6m)z&Cbam|mgQU=I2+9z>VLdttM-P>8sQ=`QkI-9>d()^Dr^ zb!p<^F1Qc3k=t&s{RhMXDqLM!NiEFDP%Eg7g_k%}M3qL12o@$^FebrN=oy^IwxNHu zfux!R$~wg9IHCMsaG{dvUE#nBn-h+6Zuw8@nF0qi58{a=c)DPmMPhNdWhl_n0LGC^ z#27Yo2!V7=k(L16f|k)Gzu42XF(OIX zv2!_c>-&@q1FxbOsK{|3Dj(7ldCWX48@0@>LQY)IOpfY8qSER!)_Xr7d$as+_QPQu zLRCX1U^4KDH>t=kLNRFrj?C3<@P}DwtFF-v4MQS3ElW^~*gt@rl>?!5PAejKnHvm! zl%SUWBW1>klu#12p~xg6SO^@)2{kP%r!ajZiJLSgk}O>qG`XR5w^(zWwMO6-jTKso z5iTEsGa*}J=S3{Bc9_PSo|H|FU5*`;J?-*Ai+Jq8o#4#(Z9zYwal->%5>`kb6at0> zlOc;;K6}h0)yFdy7iNss@dx87mzdHr(7*hj@<7tJItJE$4y=Tqs7d5C+dJ~b^yqRb zlYTTb+I|kTC2ul=4>$^J89eIprE)=olS^XMv;7=ESU5+kjf~}k&+=Hyhp$u=ap8XH zU#6KkwU=qF>* z27Fwgubx?|JI^fHN1JglQ3ea*JhRd(rLFF#_W#>Qu+yNsAa+097LkN*%a?<_tjkkf z;~jjF_60V!r{S-z0I($jR}=bB&L9!bP>OtQ+e%6u_?XOAeW9GS#V55Db$W7JRJ8y%vAsu2fa?vm)@KjE+Ij$6Ez zjAuXHzVDcXtC(>h-fs`Ca=86=$DB_-?A#D?V7|R@7bFM79L57T02|Z_iw=>j@RU5g zG*sdac{CjfSr@T0UBHGQ2u}FS_S+vJ5mo~-4oTdx`0I2*n`SEV^zCK-)cFhkBzq{Q zd>k0t;4{5-d$yvuq+Q(%Mkd~y<0{GtV~Rq{%{6xVhD2;W1}aKTaet7}He_ItM8?o_ z4!CDCLAm$MxkfnbaRr$feSmF0E>=fPrh3ScQ?1!?#y5l*_*?RnCE5YRFvKF^hU_`a zHSWwBXpe2J((JXyQU5iHSeEQI_MSgq1Ow^a$o;_~AeURv_ zQ%i9?;&yuoK0>srSfnCGIqfBx0(!F)G~_Y+_mv%kVNv8aK^q+ZKRz+?5pf@8V{e2Q zWk@yxX7=NgL+P26Y8P;@a*lFN0a#E|3&sj!BstOafGWWlQISeB$Qg%wB4Pm|l6B0Z zaH4(?-T2}RKyWO24m;il=VShUkr)>*v5?@wFw3Gr%lt@kLiJiHijbW31ZWjTeZJva+UgkbC@3aR)2q#(rK=$TtvJe9DU<6X9)lbuq7#x2o6Lblu` zs_coBy_)9zmeLTnyBPMtQ8X?rPPJ}%eK0h?{Vq|l(Q+VudN2f@9!xe?-6ekM)0ge; z>M<^%^}+Hi*JMFOMWN-5YFSxkw0*Ans84dAsZV7UHTT(SC z>3LF$oUl_se5YV-_)Ljb(x!O^4>u4ei7njNw!4Rr16PEN9xz#+Ir~{=Ix80gAFYm~ zdUhZ@=4emZg}wzhWOlHxA=9*}fd{wdfm=@BXn(i$1`Gtcu^y|_86XEO9d&l#u);xJ zInA771Sx1s7@J%zM6x(SOlUX+I%Je={KV~qW{Jjx*9i+}2Xh=ay|5N#%zE&b+{OGw zNb;8iUXob6Yc~}syD^<83mYxlo6ZytGK?8hn1^QuKq|zz3Q1P?1Gxl995ZiDt1jQ< z&*T+@i<5&ykXquHZ+{~%=}10xh~-WCjr?45gsmN7R)F;1A6xIRtT*9TEl8yS|3Lnk z!`+*rakEHNjm_ljbqM}`io;vGPq6(l-Rx+vBZKrwm@g1w*0z#(@E*?tC!`){RrtkI z$CigFg&)3g3-OoC82rUq%U_O;nG}^z^;g8q@B^r0{?Z>$Rb)`uo5cjNW^bQjElQ>2 z*j8^>ie|@FH>w568YBVf2y0Jr3LO2GJPtGoaYlo?lT|1SQ8srBzO0P5&*YDT+eJ*` zQbn>Vzo{-XA$x5)!g|O9il3DRQn8_c(l~o?EZ8q09-2?3mCLT?ZVbXA7vj=jfs$hY zmtTmF;%Bip02IfD%4(}stXXX7VC*(?a}+;g&Euya?Ua>NCL1j3%sCkpz8X39Dg8)a zvr@?vU(qZs>a?C}6)ws_UIU1lQbe$cdkTCuS|p%dO0pCtz!S&dDyru>>f5#PFENhSB!nT_RZ^OB6=#f3!%2? zjb&P+m?*?B7p^M2@=6?1|FSr_96S;CdP_hS#&n1;iZbOCp(4f*5s0Gn7)qE|-^ZJv ziWIJZBm_1R ze8uUbk^y?v>&X>pDl(n@lw^X>aA;=mL!qIb!aNe@Z5|ElDJ@2HG(rY>3^6Tgs+B!S z{{kpQtic~Wi`QuCI^JfW@O*5g0`{p^KDLz5=Ijr#`=;PqY{{N#1>h1-rKx7SVb{%Q z2!H?ERBM`IMkDqYVox=>eqT+sh!bFFvv3VrrW)vg5KTn%?(>=?H7Aa&%-qXK-sd$@ zo>*idUM89G(k~t~4Jx}sB^&DUWe`Sl6*aSUSlPsK>JS_f>e_<4* zT1(5~XU0Ktg8%$ZQiDJ*zt@ZspA};Bvxp^Ngqq?QPOimt%&gU#^@&mm@W$4Kk$rX^ zba@ec%ved>KNyk$gfB@6{Z`rV5PGP!9DjTpVsK%_QY$E<%uf1R;;Sk|?Oo-wW}mYu zg@jvdkgWaM9`8v1veK&E917kMK4w1RdM_x}vGuF|4d2{w6Yve@4+cfHYGF%IA>zhS zu4a!G?mdjyh}e%FT|0mqg2O16Gcw<9B$21C`iFpIjn#mOaPV}PP^b< z(hZyOzUieQ%jzWyK^?<+%U^JDQjJ5b^j%~d!2N{L35O2G!X1HvLrEQra3MV1#O@_~ zZW-}7@J$`Fu7G#&W;X&J47&?CyU1H4b1m)`$~E1T3tq`Z$etm?AW?1yBzh9EkM4$9cF z6;ZZIRp)9eDwY5rsG|n+uG_MElwP!j@PlE3y;$J*3w$bNpt8r*B{~`4K-N5|vH-+V znZS#ptta~e%*j2*0SBl`))M+MXx)iZKzD=q=*3f7ptSDB8B!3#yJ!?VK+zD-BPO+j ztxLt$tRo(1uZ z&4h8|+%|W*4u_#*W31;=#n2X!AI;t5NCD(DMV2~G40T9b;X+unUj?;-7ZS%CUrPU+5Z^qBZCcRfuR~?}o(PcvbM6dv+4k-iWbX2S-hcrHB z+eHartcRS&@7Ieodh#9A!5{nqZnato-~brDVS&qYnABggehDg5$}}k_K4AcvW48pz zs1(>%%HxLdN$P|UADzBHb~-E+xjSm15*n=*igq4jTyL>SG!{3~-PIRaC2%=jjxhdsVYntf)O)wF+AWVVuK^EHqVJ3%h#w> z_>&~YcmVYt7bd$gXVGvv1^03a1hxlK+$*%cD&3rPdcN$`E0bhyY8P3it?i zqsKxFmo?;HO$wSdLV{H)^_kwOL=-5r9vL21BU9ECz{L`oKwtJ@s*zFjE~YV|?P^sr zK%O5C^9mo(PYf9EggMoz8~bayXvquxkjgUL921SE;VU<<`+1ur)ia537>Syrx;Y9+ zA&hx2*_3L?$R;8!MzBss1E}5CzUz};#mf`$NBWC5cT=&HmFMY_2cWIa9&~#^?B%VA zmR44+T_tMzE>${8RgBaPB~7uYaV84o$dieg^Apw@nM4r-NQBrr-$@;#AC(4%f2XeV zIKCO`SGj9I)<8@vgP6h33Vl1Nn{FqeVpqJ*Y)mvwsYUm|EJ>|OozH|7dKk{ zr(ottNqPBfNp(_dTPc(dTGk5KJWT9*cRzQ4d!8_0L}q`EfLmdve!c_yF`=?D;f$@xK+Iu3kH3z!>r z)wsu;)D%Kz{@(~~$|kN&;*CWZtIQcFDyvMU2}x;13Q8g!G4Lz<673Rpqz~C!JG`qk+N^K$uB|ZA;fBv`e3jQ9idTdnjG#AWQb+^<8gcVL1tMN+DRBn^*f8((nLSD zjO>J6XoumT4trxOP#-A>C-;N&rz~W&hUqNBIbsmpLLnl+KRZPOM-mBZ9aG9QuQ?|z zC|s_H08FN6CspkMeMHG$G-Vv)+PEU?(rYQy0u<%lT|H6)5Mt&KX_%^|WSHYTB@w_r z&8ondg=&TNj&}jjB@Bfge?Ekh;a>5Zj~E!n$au@Tb(&#zCbK3QcgwRo_Cc82=W?tM zFea&LRZokYeTt{WjS0Ltg2eeuh*W^QPW`YsZRv7207=Yli+Wa^?0hDCg`R^GPeoH? zEE6<~!>KwXg}j%l^O^9a&jEWct7WBvru-Z@i?obQKrWL!RMs22xgJ@U_$i{#xKIZU zJlSyP4xrb_-JnYxxPc6Ip$;B`LmaJ=bFnNI#Ov~14`pXVMpiJ+lzE{Y)=J%6!WFv^ z&|&VU*cPAo(aVud(#s2_i~)`9hy&{u8_5>7SdyS6Q6zP2>p zdmD53xpq+0{dYim9xGUsA0d(K)9xt|dE?StQfwB(EjNM;*48NlS3d^(3#fOwf=C^V zW1n1SM-B;EQS{0i1;rC;Qe??%mD}a%0A7_ci}s9wk%7G{p`oxs{CTIRTNYkPW^Tkr zfR_tB1SeZWqTmK-(FDLUQzL=ClWfL~bndb@GSx}&X!)`(keUx52T-pAzf^#EG`z`YXd49KV+7(O@w>E%@D;GA zSQ2v{CQ0us09>@IK*v&{3`v%iB6+d@0ls8kl1B@Fr|cT4%c&wsQkl`q>H!ZpHDszd zsptWQPH$o`J$Gp+Ja>pgogIuB#0}Z4e9KzK;e!>qUD(>n0oyaDI>qvL`2KvM9g_<$-%nc6iLUaTGAX}xkn{s&q*Z=F?rV6fd`+i8~{qRzF}nzEvz2%vr_;Vc~BgtItrobWye zx{Jc8ovQ<(j|-hCK!FC>DipX%aQPd8(>W4NGS#QkDC;)lA;D_=l&tZ zH#-HBqJAW^|0BW`!Kq$v)h^w zwm^v4-lOrR!ORWx#eyP(D$Fo{PW zCGUyKG;K!L{iv6H*9HmMo!r~1=7iLeagPkSM5p7cl9Pd5PWA=tgWR;V65=yy8Mb96 zYS+;`VH1!6lm27MIUi1ep_~Y0VI1OE?WYjoKeb9yglOR4hL#;U?0np-*{ceqE0LP$?p_3pR^)t7ZldLoo z={rnT(1!S8)D311ZG!XzBsXQME5R&%mgY?Z4XwbBhM);|)uWArrwer+C`KY&Qif#v zvXRZ}pcQW?XXMEi9rUS<(Z$;wa%G?01~aBy!0xpCJhIzhny2HEBuGIIXbu=A-HL%9 z*Noa|3A(>LU>J40w6pD08grJbW(-rK_*!;%R8b{!sC5~iDn=UcOA3=wod6WFN#6(^z=VjKo_~QFq1~Y0q(q05 zgdCk!oe0rE4vgB)*${d|2eF}?%499+Y>e0r<_(<-$$kB8Lnmsk;2>z{UOq$(LF_;> zTBvbl!FxYaax+#Rb!Lvyf~Vcln&<=zsyigG1L(`ff zQiwB_s{+<=U;+br#=%mIrGuri(dgVDRS!EVXU;mB%A|@N96*{ zqt`EzS14nQMW#v5vKT;&kx3|8-GtIvvZeZGx+EkVM*}(m!(*Mot)MY+U-%{xfSluU zs8?lS`l92~{*yA+)6+g2vsyLZDnUYlDZs~Q{b~{uw{W(o*BlFVp6<~9W&)UyED$m) zGq87@dRVq)lc9zYp*3#fL`GWL;I!uhd35%R@@Lx6X78dlR29{NF9{)pmHCT{Iw~!8 z1Bji`h9I&c1^G(s4|r6*#|6k(w!l;NJFcdHrVtuHV|mB1oNy!gF>|NJ*q~X}N|3FP zM*xFXWV`tXPkRI&N;Rr96A=<1V!$7Oj9s07utu;l%5whld7j{KOxir$hpH5346-jW zFwB?QVrODP>Uz#XH&Pyx&O-+@F6~kDxfTP1;0T4$fcs?EBz~PSSJ(AZP~o6w5Q_-h zJE6Ix7EuNsksu&7RrRhM@q*eYkMG__gf`8~;A$1Qo9C0TmAM(8Dsn)q;))yFPaB*| z8W}c%Y*DJ-al+i?$BuV{^KL`{tPd+4LGF&D!Pv=q0H%s_2dipx{hB~zrWUMR3<;lP zv0|4{j05tIQDJd(CTva^8YnD#Z~iM-E8>n!lx~1LYsjib@DW<+QI%~AA?^p#o+U<> z)7g)mxHwLx5EE@ndmLoiNn^d8IWlxkQiYXq! zqca0-#_~!h{luin!LlSgjF=d^E6ymi3jRGEI#h!K%MiCE6Hp#0-dNQ14OoF}@JOXE z_U;->A~pqMmBz}~mpSeKwhXR%~7_x52T6-b~jL^MXD{Y65=75lp~gl(<+YpBAxVm z-1ZtWrb~M)14SspcbR&b3iJz(kHm>lgbEec>e4qzcU2k$lzU<$>}1;CI@59vFG>h^GzO*{ug-!F+sHu zn+$hPTSWaC4(pNWm@HMh*9(s7pZJ8&T63vRa0+97Ga9nzVYDD+Cte%?2&B_fSHR0- zfJUB}k(xJjGUdg=cW-IAn5j+6%9FLW#Kp+P{o))jkUvk|ut96;M$UBz0e3?=x2q){ zZR;!dq>c<`uo59q@_!FKGmno#XFh$+EQka!zwF8C~jRAs7H59G9|?3XEfrI%=LKXPQ-9<;^ALV6BL-!0_eo3d`-)& z2Hh@qFz8Up-3m+sCzh^-9Rgxy#Vh_YIghE7gz6IS6a?ojpJv6+Xw+aRsgM){0B^PA ziKpP)a=lCJ3Gb?Xk@E&abz!I)llxGt5OEXSlVAbTmh+dcV%%WR(V@{UalwcOgcek7 zQmNe?O+c&2BTg%bXZD>gngJCr!a0DJQ&dD-2*nd1Q@9oR z;c3`n{sA%vE>im#I}TfQ<{wD`@%f=0dm=t37(nuZCn=t#F;)Xz;F+nlr;r5QTrNm< zZ~X0yF({7^tJlpa;sAyk-`qQU8s%9k)Bu}Oh#4@(4ekDy);o2KB5K65AA9RtM?mmY zN7@K0FApB(D1h^8#>MSHE27JM!lb?`Pj=e`ZMvj;+ zM2HK18?N9L<9;LkCAvLj88aztLWoHX8+Z@YGnSle3ow?@O65TaZjrnb5Tmm=SWc@* zx5e2n6JUDV}<3KvpJHL+UHLBY{ zJ&(;stQ#85x((EGhdOnPa{nNxyLYcU)TKXETz*jJJk3uX>Ov?&sWXGGXD%J0HbJqd zYGsx`TP99GX5kC1dNO`;IFMbVN#Z z+BaE|7`m6EJj`WU7C&zr1FAgmX6BMv92S6cZotn@Rn%I+%l7G9GA>GA{dwqm0W?a( z8M!A#YS?(T1yJ_LlR$hJB!1XR4->S2wv(AdFX!F3A5Mq281QjydezJ7#TI~gmDqbiBc;?OMY7ALx+yx zI%_9_(v2#^xLJq=PImZxah#xKD+?(Tu05o-4ODmx2iYPdxY&|NocMlG)F=NMHGiN6qtcQH*?Ec3FDm@gKMdl4vHUt1oAZzW0-=tjFAG)&p$5R zLa9e~mmmiNx)RTQCQmOLS&wt+Sh|82+oX2Lq0etJ-XyUvh5-@n4`i?3^qH1Z-0o@> ze)>_H;&?H4%He=`oV8^=#7Im(6H*F#dL>rBMq*ZTlqoDKEAc$eM{PK7bqFUZ&PuGS z$CkB{>nte4?EK@xSU|5uQ2cBaer(BHJ+v(FU`m;wSFbH=zDHulIY%W@v#-R?&2Z^KNyjI*uFwP8}eW9`N=&XR4>H83|V#OpJN z`E&ZY#zdC6BP*Q-WMq(aZ=RKSnE;z;~xB-WDZSvUKAl`oB;RZq;tv&^fhBZSit3@7cAY`ug zPtoa!e-tGTwg04{-#$6lV-jNn#(;SE$$ivkzIMiLy$@vDu5F0@)7D3S8oHG{WwOHg zr?HQ&9zEQR48A2d%7DLj#QFO+LV^g?$zu7vp|+}$LQP&Zg3)6gBo z=6m}I5V=}~pN8%@4IK#&S%YN-77{>h@Q0kl+dKFp-|-7~h!mA?S1$|5BGB8sgK_jO zC4sSQ7AY)Z0zgIEyS~%gM_C|Lw}MySmeXyMigoL9*e1+hvbw*{ZJFr4(@}Ry@Fj<$ zZsE8+px|hAxuPa{XS@YMMuJ^0-@&&J>Pnfj#%c8WRck>-9VE74l=KjeGI@WeFt!@qIs*()( z3RFwIOB#LfQ%wx2rQh-|YU2JW_ya7K`i?lL{U<)rgz2NdLJjSoQXX5Vmi#pIk;}$z9b3z_3P1IcV6JYFf9b|MIdy7vTZQ+J z#0Z2ZYKzvFc^507Z1BuILFlA)!Mlh3(|Rvf#X*0LzvNm5Mk%PH{;U>;R|Po^E~SQCo`E(Rln(6Ec40apO}Ka(nqv%rX+g4@pg}oD<8Bn*WiQF=`f-1%Lig zQ#ukeYe6glL0yR@bw*;=VEj^7X}r^qW{81UD>;x?d?YKem}w;D>Vzv@T~BcOalM!e zlTzZUt$A+gMazi=zAB@{Oss()yTRB7A4{sam3Wy!E&r5gxmNm?v881OwW51F)}}w- zc9}sf(v+2vdvhUPW>D)cVLPC{@=nI~+EY>=$eM;W2DTghj|K$$%BNJ} zB|k1v|QCj5#iWJQkzYeQ3NY}Hb3tj!NM^Raz50jbF>1-(`T>@lieQexOXe|@~hzX zdO7{}Ksa^_Kzq7jRv7zez|VG>_2;0;$fHHGnK9nx9sH4Z%q180q=avN*Poj=%+H5S zy?m`dr_Y*`|JR?htb}Rf zF=^lSfE%)Urtfv`hp4Onz5+qk={kipxSodJNx1a6LZLd=b==}xn zz0{xcfORhIkrlBd<@g!qX~RM({o{{%it#h@E$J-DaiLC(KQ4Q>j{FH!bh6NySfe~% z%&euP;Z>RQ=N~;w`jVHPfr&`Pac~;?a%hR)=g<5WxFs2T*B>>OS%f*mChpB9XF92Jg27LzX(OCJ}XTGsptnS2+T@|uC zwY*M}PHL%;;&P2M@v;VW`09h02V-1wK5Px$(0QsIa{{l=EiY?ON2KK3z5-w#h;r=8QTvdMrV*U`NH4uJ|pMqpjLRDTabhh0Z#MyosLwnzb zuB6OHXhn7V6#X%YrKu0>ZbY+#<2FKL^y*WJ6;gds-O$jDgwE%=T}D-r+UONdsfXKJ zw99ZKbe7CkPL$rdMk9v=K7u&ScL4F+Ob(BV3e<~u(-4p|Bk$h(T~fBWqNomxO>o=Z z-M4qtrLR8M^EK(mM&KmK9wYK<-D3t0uFkStzb)=Suvolv%m zd`hBLA&33aqTUI0{@g#kw=P;Ga>k#e2UsRB<#VOp5V_NG zZr3RV2 zZ?{h^%#}b*FG^nyghGe!cLRr?(GWjRKgkxn6Y`E#6Mwe~e%0$2`*T}P+-0TSqZ;Ue zNGY^u^WkKFP7bZ>N8fSMpZm+)w3TfFb%vLZcgtrkbS6WVfohHKFAKD#GuAjLurO>VlSe=PqwA@A~1*Ol#8u+mr zjO{Thv2y&)KQ4Q>^ykFPP}X53US`nxb5vqt4wsgf8MOYK8LU+`_Hmg(>(3FnpQam$ zml>4)+{aR&v`aT$v>ebeQB*7Oi!o+jp$^nrWP0>d#REl|_CgUS`nx zbC1T#&#(07)Gj3CWo>zxL20>(cXW=nxf{z2T7T|7ul)Q9|iT@!->`2W1U)rL+q>bp$P5^>@{t`b+G2^F6e@2&JzDi~5-Aa3_+L-ibOiddh zzky7ztxX|dlm3haY$UY)97HE+Gofw$5DVByxarRyJsRR?_juBufjA78HMU3T^%TE8 z{G{lk{ceobJ^A^ZMsV!Nq(3usu+8 zs{H$t{>;#gmsx*q43$!9*Jy0|Gc(B$d4!@rpQ2tWp8zkq(FB|Rj9oC_wfL$y?M;7% zk@}+HPObBs_cwyvcD>UznhZ(y^l}fk+wzdze(qF^MDmRPf54<3BGT0@|Nelfc}ju@ zh{xOVxtA*|r9SoPxS|xR^+|tL`qaLxwJ)WXP}`IKOhvYhkT>rQdQ?po*+KVPPx|x0 z`tx^mQ8J(IufwE2JJG!xX(OdZh(ZQUZUcstanR?zSh zX$uFnm7%LoJ>BMK-~BM@&kWsmSt^`U^sxvh{h6WL2)XThD}hRRG3n2T7y5I15QtdH z>o@(mbkQ0(8Ot#UfPTKvQ~Bx!`!f)p*hOO~Cn|*u(c)I&d1g(QFB1z*v}Sih|I+45 z`*XQSTn23)XAv)wU;1+ZR}@`Ui#=Z=UiNP5&!tdNMRz6k{Oe0zx|k^rCh%51+OcI# z>G+vh3s%j??De|*t$ zs&(F0{Br@4mX=SY5)-eswxod{i5c6sI^t0$dnI0G(E4+!#JBP*k1Z`TX#Kfo(EL0S zFEeQUIejj&k$9Ow>CcgMH@hdN2U=#(`g5s0q}*5^@I}ipv$#^kdFAJq`g2C3gy_)cSNd~`$P*N~44o3=j(xWNT)UO3EDk@4jHhqi zlus}$_PT5n6aKlNq@@H(@9xVIgP@eRejW7N-eNi6`?4PA!ap-}10mP%pF|M1&ds~R z@Dq4G=~mPcIfn+q?Lng>QH;eS2O&>A5Y7)8Ml$|ci*g{G9#q>~iWN~6z}-+M zw13vPh)?s*cSxd_+lGH`Q!E#G(0sYa2b3TSO@Nt{%xf*{sAforJ{+AX^u+)QZR5NsC} zDWagP@l?&EWk|g~^bQ&9m8P%xWDr8a)NT|0c>*CEP{DIq@;Q-I!cEY|2trWYz|ej& zj#2MRcO#QgUE^C*I&J^N6r&dwi_Z9G(hp69#MCgIZ zc*S&2mY3nnpY-Q|J#K%(KPNKr_9y&v5M?(BqJ607yX9FAojx(5aF|Ac?; zz57r2=SR291kF#s@P;(=-czg+jeRb8trmFElGa@eI8uOQkWMi^18f;c2G!VB;)OS?KSzod{&{TaMa!vGax47v zNWAcd_2(W9%jc1JnL)8-aEvPBI6u%jN!Fjc&z8?a%dhn3K8V!@I}$H5D1Hd&5!txUez8sAXFGRe#PnD>W87 zZnxpfxPEWn`g4YE#y=}P&qxUW>asS5?D*$j^ydVWk#x#E{#}1geD_TShj{IM)c(AG zD*ZVC!rO>{{#k#{p{DWAzw6IE8e-r7)1Tu(pbW5k%%5NM=ica*`CcyrB*G6*`txT* zt}>;Hl-?BSOZ_>0E*WD6(nVtZx=3fgyhDL5t{d^sioRD6rT#kvWeff0Sw>{ShW;cy7XPBvHVv@X!Ba ze@;q`+n?~yNn1jyDYK}1ZI$84{!B;X^1D6bpO;Nj2lLisp7GC;4LzgWR(B4D>ZUr| z^N-qO2I2vdaeKx;OGJ&tXa&ieb$iA?i*O*8#+cx&+cW-IVrL|#(}_>HJ>#Djtw0)M zP|CMw{PQ{*h_>JuZ%_E=k|T6QFS_9KRHWz={`r+&*AnRo|Lh~t@PN|m5-v(5R^4v= zanTC2g|xM&cD9jN96o-AqLq^;{PQd8Wm}1#@XxO-u0bi^VrNmud12(QGCZD}p93Ns(XSgZqN8< z-D+rA3#XllEB<*-f)N{G?^YxJnQDJ(MgOC@g8TVX(pcUa|LlDJ10fyRKP4mAt?$14>NjNVMNt zN5uV;W;ts{(aMLNig4} zn{}6NmP#yEr@U7aNGw*X5?l4|d`kX}+lYVur$cxM=v}>&^{3VN9sjI4vQ6Oe&^`N{ zk#6v~er*P75=r$%l1^$rVFkCsKfCTy-4=(ThBmL#rrXXx;-AS}HW6+iajic${#odd zvBl9y+gVF*#y?M{>Dd}hS|bcp(HI|=&)r2OZyws`xaUu;1n&4}a*L0I*txdLf^@nS z{+WCu`%iG;)PE|uMt@%M&!{-#WgKy7zYz-~{+ZD1iE!&y5R$jXKT}m>A{@=uWFyaX zi=U|&wLJ`Uu5xL9_jtnT3 z1o75q$>*^p`!PmhrhMzO<@5OC!W%*qg4|7=vkcrm=uFcSA->V^c#NQ@ZnmGXpt zMmo*J8m?Ge|Ac=|NZ{>H_~%x(|Ac>TW&2O~=T^4=gnw>jn>xtz)4#Gmx3c{w{BtYY zsaB`vdBF#-P7|$ctBUqC2`~9M%E=S{*~p+_x{Wo66n(-!n{FUBKgTZm`6qsE-dOqB zuE?Qf=}ieXq*B}Rfc7qq#1J&Z$PJJD`|1oEc<}8B|NQC<+gibbf3^*nT_j2xwe7X1 zw0b$ur`qVDJ;GZF+vjd^ks&2n^B;%jUVAwyE`m=pv0Vi|tZNfj(zE!G? z(7M=rmo>P3iJz_T9419OhQzuoNd<4lKhq-fq#tq_qd$E5PPEQVW!K#HMmT@+(6w)3 zz(4;ybnPDu{u#newWjIyL1PIt{<&1tts@Amp~p0mN@lkW|J<#)T@RVx`NGy@f`5jz z_0IvlP{wxFadaf(pCR?m;kNjb0rW`dyOa>Cvtu!kNzFWdXI}9;>gU!5G~l0UKUyvC znAo(>MeQQl@XrvK)7;i_QBv{Tf`5kemj`TgmhsP!?hiQXcA6X>v(JMA{+UZg-A-Hx zpR330+u`m&b%8~z!>O~-df z29-0xEH~qy*FLV@YFW47pSf&2dZb$kp}KuD7yZ(7kB_!>Yn$%c>1lE33z9{}cW>A%PAqp1Yx0>PhE;WUNj|;Qded z=Y#~RR^QyxOZ~a~yy_e4P--VkNZ|cX_~-ER{ZIJkAf4{g(x<1Fycbe-2`0BpyfOm7PB!fn@eyCgqi#Kfzh|XZ$lfyX#(sca1CRKI5M!wQ@j1 zmyOX?R^1VGXK%60o~`LcalIG*dD6&dG>4S%095V^{<-v|Zm|?B^yf0c-3R>hqLIUz z-L2H)x(EOKt2T;7$a>xV3I8mwN4E$l!RlLJe)kFgT&B(L@nAddq0@n#*FoNmp~a!c z@(iRKV&BO|eh{8%zf6G@Ia}Qa{If(z>7SAhd1sw8Fzow=f7UV&fxDotX@Tmy#L?fi z&FO33TJP!*;&+>izoXf{CA5R zjnJm;#uY_zydoV^L-U0SgbtS_ng87z=xZReTQTI}7~WX$&zRYV78~-g5%XmAqIBOx zxb>J`mCNt0CBi2jb7&Fc{rKrEhxEI)=815!79W5saaqpYcN-l<&x@KPV~e}BICT*2 zVzMHfdJLFo=t!?Y9r@%+8GZsAG<@>RHrJV--S#i|jf}ImYVtpStGFT=ILKot@YA3_ zV`xquSBh7vRSfr2{tfFD`k`LIZ&1djztO)W#a!&qPIVne#(l*<9~b@E>8=9=y07@> z;}So3lzdA}&3(l`9~b@kDE#v+LuZTIHm>UPyu-S#N6I$dSN!wQQl}?H7i-DZq2|8g zpKAdR#F7;W_`OS1?2}DPi~dZ7KUCuGbMdpT;3Z=HyR_Vbam7EsY&nHX?koQJsI6>j zS&JygCFvgXkF)-K^z!e~5I;{X%{tOimZgCI@AB-;#Iufc)FO?WGDT>|r(b4J>n?bt zyIgj2%j(y7F}LmoYc#|3kwjBJ%eZ@Z?&EUgyTYv5uT={v)zi|{>2ETD%@z1r14lRq*def}@&*P8t z3Fs*134X(Xg=tb2{h1nWn|FzdxyuYL`tz|Aw$i><{Bu>*ZriIIxo8W$caQjI#FT!D z5;$NN0@0_ky1Kz@3fps5r2Vp_+`k+DJi(3~k+KuY^3bZlUllT z!3qz#iblc_4Y7^01Q3R9t4{rBvg!;SImHZqU-8eu7W8GM(kA=aeZ)Vr!;USYBc%Q0 z+`BjaS?=ZG)}>~4`z;uC`M=WY`lnI{`;!&TXy!ZqIZy^Yt|U#3P$}NqXjnc&;4RGZ zSG!7)mS0u1y;^sert zGhhtt_Z|QIPj8DrIOlp-$d<%g-5dYR#eUSkD|$|KpH9k zbRY50z$qre&8*xSW8DRjn-n4r3DRsU-8e3>3A93K>yp+IpUvDoJPVO-_o|+ z#y|g?LN9SGkC&aA>%99;V=$kAG~vC`7r$t7pYadz?BkWBoa@ z?iv5Q%%F~kL-z^)ER|PewbAS< zTR;f+4gXv^coXW7v|pAo(RZJCH4*zyn(+OTMxzhu3;tPWiGeUma&TYw0slN%b0=*& z7^-{l&$IqK>!#m*n`wSw(F7d*J_bsPS<^nE>)j$4^Kt(HCB?`SdY|7(xfjDMC|QsdkG_i4G+o*n<}gT6_9LauiO^=@}? z=YM`GP)Jv7_0~m!H{1*Vycbwe{*C?Rx}6vnvIa-|vx|6cl*+%6R~v8||E&BQ`^y&o z?c*WI4eo`1hJm)%TtCG~;3RCnFZkzKe{S1upC$(U^KR(XT}J&mrFHLxe^&jJ{q(A%6)2D@vbaj+%49zI?{eXpkW}Qs=H?)5buK0M{TctP2^xuI z9*OvR#y^X2AeOqBs+ad?{4*kJB4!SH!9L-iU+HyyEPc-ZY|f%}y|{~9YyYCSKH;Ca z>(mYDbqNW)KjnXZg}rRU!c+d|SMh@zZ&+v!L-k6pYjakus*zYLZ~V-p^h$ZcKfltx zk~r@EgnwTADaP~nHt~diUi>M}XPz*|`xE|o@u%2wxAoZqXn3IIJZS6P)@RG7k$7c( z@pF{GC;ao`Pw{y$$?on?_-AqGILj2Uo!WYvc)~xwI<+Oo z!u<*VY$xY1I`Vq_>pq*4#vip|jGtNUNk)2q%K!Xo4JM%M{wMr%LyWXMr`x_-gYEDB z6aG2HDeh1B=eFNk-$?$te-U33{&_%b&@~N7schddS#0>{8L<%!rj*P>kB0HjGh#y~ zo=pin2;kAozu=#>s0PC5&#eRw_~*$GF8BC;S-39v=gAQMl)tXcS^z54X)~=hcg1#Whr}c-|){m*bXgD585nh{By^})vQfN57&@P(S-YefBrdt-6f`> z4gdUG{<>6+zX$)kS)C8B@^AD(tYkTVtH076@(w=AySKLXO!()`K<$jnMJhkfF^cPm zfA(-`-4t-g@7MapUVU)rDJNq-1X zGqy|yuH`cmk?^|5pYYEKuY3Fn{~R-m%LYu{SccN)>Ez#dh#_WTNw9sKt;s~$e*6jl z9A*3QC;W4i?Z=<+&r!A?f5JaU*^b3xe4s^R6n-P(>+vW2bN24XpYYEyXFdLef6kur zs3ke{`IU~8kibVB;78(R25o;XmH6=|{But9k2>*>EzfJv_UD8IKFT0462Ca7BqZ=r zCW4W8UV~+S?mp|cktJW+pG)m|{0aY@V3x<9@;@hV@9`)6bBnJ(;h$42_wgtEb20+i zs5Xtpg5Pk)h!$Ue!apZM|FPnq<+_!I(MD7z0W5JQWyC+@svHS-`*S>E4<9TwiND&P z3qbfV{`vpx&;KHyy(djE8_^s&X+C^pMO@E>qoVx}`*UF- z9xML&HGZRqPW-%$s#UY?lQlAZim&a_`JaEaKPOqo!{EYFThrnz`*V-8&V$wuoyk1! z=v&_T@R2a{hj%i~;BWTl7|I@nfByHo{kd0)H>dx#KWBB@65Ku9K+Tw^^PTO_!JIsH z{PS=2=W>8P8vp#O{W-brABCs;ANz9!>@XwPGb+tc->k0r_ z8e6xh&24)bzfs5k$Bcho@Ee=`xsFQ@+YPFjU+m9eq{2V{pZ&RKaO^I3cxiv$#(8*} zpY6}xvh5rHV}FhV^ikg5u`jRe&snfGA$l~P@f*xy^yiF*Kbpv$2n_^ydhf75}Ud-T6-3I*^RVihpJiEX0gJLINLhI?pY=XgMK) zj}`yCo)4WT@I9+rd(YJJD?5K2ppO;*Od-_eVp#^dW|Gl#F$0PPw@y|xjjx#DA zPDtQm#XrByzY$2rW5qweZ24c0hGpU~-LE_d2?>0x_~(CS@bO}1Fd=~tZP)WSFEc2% z3=rtC;-8;_mW|)&KCk@z3ct|@F)gC08!z?eGWT=u%hihoWn-e_X6 zKevuF;-3#c+Mi>xX#Df9`Ja27|CIl^PZKl#`G@?^tpx7)=U?(a2Tu8r;>n3Bepx>yZuiHCfBrH5^Py)jd;G8YpPTuOfBrxFbA0rV9sfM!e->L>e?G`^M0j1a zf6S8gswfpE0o~6youx{LcYSJXZX(BLud|VV-2B z=ws=Me}2|-S$HjxR{Zm`{){kak^7wgc~N_G)^1H^#Xs98KlWK_QR~mo_-Ado&3$@6 zgkk(Ck0<=IEe#8CAH<&U&##DE?RmyOtJR@p9iIbfemvu!rH+lnT0rHreLUfxUuj=$ zCH@osInnNqr~J>aJn2d7`FP6z>=W?VXX(#T<9+g&9!NXVLL7hh;~D?_f;jH8$Di=e zebW3B{<-&*KjEJPoOt{R|J;o9C;W4&1U~+Re-3cs@r-}g&Nz&-_VswBAJ6#b7sRQ) z@%R(|xz(OO;h&R->hUN1b0V4_Px+sH<{KYS`g2s`KjEK~o8|GG|9SBs=D(A5;G4!uxhdDx6cFwxN>%(rdPu&-m)r>kPguu=UpB z)MM@SsV4In@XvB#WlB6`x}Tg+l*@_a-|)|JGljJHL8LKSw^k7R^Ok=jNv_qZ>=AW& z8*u!5TmB6n(TkwY0lMgPO%(QH!atWY!vyI7qr_g`!hFX+C%{#to2i=Y-Req8%s(3c ztbD}jVR-bxQ8!#B$U~w?9t-{%OTkDO$iqflyQUs(!avN>gfaYf{^wn=a2W@&q5U(* z&{8j-^FL4VeFe*Ijd9NZJoR`ti??9h^FI$Uj?CROBCXdM{|tu@eVpPG`5xYgF#i+& z`Iwa=Wsz`Tb4I-{B5!`TlwQ<}h%=B7o2H*jWW0g@3;M`@Yiw1)$vN z04ODHPda|-z&O}aRqYY{^T!dD<^q?0sYCxYb}l=T>(~|jl^Qo-cfKk!t7ln!42=eC zc-}zw%3H?;o!tPhUKLnZj{wRzAGlky&P5Ah&E>%KY_s{zr`;5?sSSmp!1827AuP8kF zEIRX&fQmki53_>uH}QVV0mvCr5>QzShXtos6dsL$oFOFvmF2_n`Rz5{#9|ydLrMZF z`Zzue387cKkN?suC{OG4L_Owu8l`8!aVomq8i3}<@nO`no|||d(k&;CB>@!^Qi z=NM_Bvq}OgYvJ|0;>hDq)A+|zpn~!L_;6RxfssIGl>}7ug($p!JKm3a2L9htuxIh{ z;b^V5SG*q=%bI3MKt&(Nhr4=?bss|GO9Cou;q}n*<&Yim5>QzS@9KGK8J@wlqGoNZ z*B4b8raFl|O9Cp(k<&!y7g0D)EoDzhKt+Y)|KmtEIUL>h7{%!M?*NtfvrEBod-5IX zt89QuG>WC*IQ82*@q5aa(Q$qw%nyUv_{>N>zO=73xRFkSq(J5_f(% z;xJJoYGg}9O1M{JU#ngu(Rts#7QOXHXYyEP0EUhc9rsdjt2*-Ym+1sW z9egwQne~GkU*_PnoBP~VU5q2_vR!To`bM*jlbAI<>t+5l>x4zyvH>bN7n}vg7xGK- zV8>5t`~WIB|BIsAUWsFcO<#IX#3+T##%fPZkVlPnz)$Z)WRyEVC2l7L=0(O|iU9~a zE4JKFft=>e0^=dgFU1HmHYiW2Rs^V=PiDAOmd^?UYIH++tZ}JP3B1O@5btt{q^y8S zOm)|STXK-!X;v2jDv|GQEqIr%1F>D^(0pgvk85AQ7V`>X@yZTR8I5o`{%B?M9ed0T zZK!M6=M7olX^`fUfJ)E=>k;pC`RmZUsTgC>zv+wd;~0LIuZ{6`ykptD+=+lO^sWWB zs$)x(q6B?0?5+jxs*VIuWh#a-tJvS)N zc6ev76E{%=sJz?(D$#D2m%6VzEfQVs>{fLJ@qRop>KH9Rg>77l1*qW36bn#c8<%1M zDlpq6B+#vG#0AATxL)9R4q>*71*pJmm(WXhN1FF!IdVMy>T)eW1!lWgfQkl93s7Mx zs#t&uZhWx-6`1W}0V=GPW;pD66V5OanC&t(-d*9{9gL?2&%9WG3e0wyHt5xnVYZ6} zsHkJK02P?+VgV{J+rciyxfY;;8B+EM`cBao)-;O+sIcT$EI@^Y z=3)UVEcumvg1*xP*SMD%vAs5=Ge8AqyI6qAJM->6pfc6@P1}X`8ft-IVg*!U_A)+_ zB)l2ynjH&JnWud#cyF*nl$8Wjrq0q4j|Mv)W4Q-Zz7d(4P22z#cHGnP?+kWbVCv_` z2D>H*dq9QcaXNk)?AuL=dDr&Q-5la(uy0j2^~U(ZBZGabx|pP08SEM~Z-7dOwYat8 z74vI;n(9=uV*x55&{QDXLwpJO&H7*=LEmG8{Uu%EOezrFYW~?6aQSJI$8Vp%n8U7s z3ef^_*3;md=O0hBT(ujZ^2lIk@+d&%p}|g4?m~jT#|FE^_6n$cYp~-I7Xc~{4R#jv zOD5=hAX~#=hm$GyfJ&fvk`GaVlEtXp8=&&QU?)veNkAp0#>)}+2D?nBJ)m;B9ikND z3aEU$9ilo$0V;P%P=-TrC*(hpL@g6g84YYXuJOkHSvaW1XQAn zS__Y9y87oMpt4h6(|_iHJpd}B0`86+b8Suk4}i+4e%3k1^{nT3c4Vy}908S>{VW$a zC+0eGT>RaKyE_gQay zDi)x^ideA#6`1W}0V*)t#R61-mx=|bz-*83uifoF1H56j%Mnls0kSltIR~VFJ_0Jc zXE3I9>Ypt@1!lWgfC?;eu>ci~gBGAd^2TBTDlpq4&D*Y?&j4>t#OA}j*6XQ$MiL(Z zm0d3yf{iTA#R611wxI>6G_#{_*l^%eU0jRnwSg1%_5i{6;} zS?4_4^ZMb5%GHein4s_E$ZV}!jtTlsj{L*}dq~hX7qZsV3jNGzbx6>6Muxy_mqUWS zGr(IT>|ulQxq`Hw#<+t7&*oX&)$8sKPW_A|J|yTn3y{==_mH6P%-NPFb4bv4c4V0C za!AlO7loD!jCCKF?P6_E4zpdX4a#A*%OOGEnSR#N>LEejX&Mlw|Az#9XJL%$pAQN8 z&W;STT@DHQ&W;STT@DHQ<^s!F&*SOVIC$8g{0#7h*)E3!edEEnd%9z#L#ERqLEou< zPMk6yl(jvd>Sxr~Awl0Vt4*C zolGX^yYQd2K6sy??;HR5CtdC>LEi)anGmXy3HqKc{AZnW>W$l_#d6c~ZruIxPB+ET z&s)`5`dOzDUE7uatU>dZpzo3Y%y6s(eGmO-nR}U_@9DAsECXlG9WV7We%+d&?|c85 zdCDz8-vj@-F~TZA-$Vadc2*|nV=JEdVBfj933!vVs;mk6zW1LQfRv!`vH#2nI}`Lh z!X~EwOuuoTpzk~XS%oeAtaHvO{b=LrSfUl)VHrPPK4BsD3Hna`XDP-tLErcOv(9<+ z)$_F`;Xe4!I_FSkqwibJ_~<`tnbO*z{3V~1Jc&#FoGvl0|HTI7zp_2obN*RZApbd^ zlp1`;?egNoGyR;6zaA1WO#XR2a_HymWNLz$>xXChIh$zRC+JK5dHwRJF#6|eCFmn* zRkafI5#m~te_sCVssF5Vp6z)pysOGk=4|6qtpt6of%8d4d*5Tli%oc=J)*bXv<5%(US8Wo1E5- ze~yFS3hwkX-Ph!wuL2}lAFMn6`5Y4d$uP0-&ky`(U0^h_Q3a#<+|GC(Ac(4Qs!UcG)5a6wLF_l{B+K5puXxI{~R4k9(BiWOKE_M_b%BD94kj8~^+$K$3kP>*Sv=^)tQbj(`4!-%#P%Kga!e zz;CeRSEqY5TJmk+9A7)=XB6Xse~u)A^AF8`GC)#_@s59162UBd3y@UDxbe@A^fL}s zo#XnYe%2kFZ7BZkBYuM?We(Ms`Wdlp@z1|#&ldmumGhKp@z3NJtrq{xl3%s>XEo93 zxNSFhJnLr(`qV#L{4)$ywfJYEu&c#CGo!E5aa+%Mc2R1i8L72Np-BsP0OH3xQYVptNpDq5GwYF;U z&uq3*E&f^Kpv6A}0#%EDW*4ez@z0uw#T4eQo=^2NlGx&(*@dcF{4MH7w5Y{D z6R=P%{+ac`YVps6oK%Z{CV;(Wg1!s=EU&=gpWk&;Ed8vibM*68b(Vgn%eDAtBFC%6 zKZBoDi+`4ZGenG`pHW{H|IF%twfJWM!kI)Z&g8NGOx0QZGocXG;-BToSo|~bR@LI4 zi6W^M|GX8p^fSYR#XqwNb+!0sDMpKbR>x@Z&vK|N{+VUUnv=9>91rv}kpj64d#Rrh z+ZO-KZUxoipUH$!E&iF1z-sZ&EcsQ7e^wK<_-FOc7XJ)GRW1IRkics3&mbA~n4m9j zef&GRs?^Vf1XhcG2J@?j1bt`xhGz5@|4c|=wfJWZ6Bhscg};$K!8&_5)6WQ3i+|RH z*W#b$$yod|A%WH6p9u-97XM60V72&XLISJBKNAvIE&f^kv&BDyWK@fPX05GS{4*he z)#9Hu4qE&(A%WH6pJh5({Ie!vA-=BlGwRFYpMTOaB?a&f=e0{jV1P3_w^d{#kaG#Xl1YQ7!&io{Ysm6K_>5{+TF}YVproVM{+V zXj=R;d8n$zKT9!M{Ifbni+`3wW%18irnLBHA_Wo&ywcC?O<67e`IC9K#Xpk+r&|0o zA%WH6pS9#?@y}|a7XPgN+2WsJsH(+3lZLt;Hz+@Gi8Z6Q_-9hHR*QcoB(Pfivu5-b z|4c|=wfJX^uonMJn0ZpGtCxGqZ{X!x{Ie#!7XK_y#^Rp|39J_XOh{n0_-8@_tHnPP z5?C$%nfUH%@z3g?E&f?ZhQ&V<5?C$%nUKJ0@y{9uE&f>`ki|dCbh7woO~hhB5Uv*gEOXD|pJm|8wed^+j9+K*&szPr_-6rx z7XK_e%i^C2g{T()EKkPbpNY4s7XM5XNwxUrt+1t^88mZ)@(X^0O{lBIKT9!M{Ifbn zZBYI&|Ari@xk33AzoBJHi+_I3CndY*OZ}YdDZo4z{4=?fNABy{_4Cj0LC^Bnk=rnG zUEdEAXZ$lEfg?ZmzVMEa3_gt|zh?2zG|^`9&-BmD;-6utn#Dg864)&MnHhb{{Lic7 zx~rW@k!6?LEdCkHuUY&vGx}!n&x8avi+^T>9SPL;7Zj%4dckARA zn#Dh}*48ZknUKI{@z0Ed&ElT{fttlXV>&g9e`X?vNQ?2>;*eKm`JX7#^W{4)Syv-oH1tY-1g zghDinf5ww(7XM7VRkQeK5_>j_f8Gi^`k5uaX7SJLLe(t(S&Gr(pVcv1{4)+!v-oG0 zDO=3$^7V+{;7PIgXSQQ#7XSR2W=OO6=U>S`H;aEJB(PchGfRHW;-A$-E&f^kv&BCX z64)&MnIu2W;-8t(H;aEJB(PchGnikq_-AJH&ElU432YYstP$4Yp9wRcZq~iHK2fwA%V@}pPzCSuUYw@)jwPO zv-)R?e`c+%S^P60fz9HdH4a+*Ga-S^;-4{{n#DhBB4+W=gakH=e|}5YqgnhjDe0QU zKNlT&4l3gTh&k>p8t*=Skbs4;>p&D7bJL~ZczAB_LcOp)*ew2;kdtQd&%YV1EdE(u zfyF;>H^tJ=syatMZ&epv+ogWSIkEU>Hd|>H|BSiUEdH6CPIFD*T0i60S^P7r|IOl` z0SKFw|5@ zqs2eVp|bdAmMQ1nhvUH-uLt^>NP%3oy7ZqB+ZO-)k?Vua;-ATs-You^kicf~&n)>h zi+@%VwfJZC&ldj-L)9$)nUKI{@z0vkTl_O2fz9Hd!Tg%VKWj#B@y~<=Hj97OFk$h} zzpz=dCs@ttGyRMRwD@ODcrE@}o{Ysm|I8J!X7SI21U8F*CM2*~{4*he&ElU432YYs ztp3^JpFuL3#Xqyw)-3*+kicf~&l(3U{+W=#X7SH5oh<%Y6ETZ_CM2*~{PP=%(c+)U ztJW<3nOMAL@y`S-G>d=M`k=)>6LQil{+Zo3n#DiME3o+I?WS1zSvSSf&-8T`|ExjN z;-5Rh)#9IJ?pge^44ip=SNw+5m&HG``rj=68Gx`^{Il#Vi+?5*qFMa2JQ<6BCf=%9 z{4-G`&ElW8!j^t!(6spH4-})tKT9!M{Ifbni+`3wW%18irnLBHA_XQw^>n45+1apJ z{4@J5HH&{{pPI3~cHiI~Md6B5`g{`nPu!{VRGtJW<3nOMAL@y`S-G>d=M`k=)>6LQil{+Zo3 zn#DiME3o+I?WQ>Td8;~0KVvCd{Idp4i+}Feh8F)UbI;T#)`b6U#Qh!^}b`!02he`cSWZt>5A1a^ymX34Kx{4-6oTl_Qq zbGP_s7^-gZ&x8bai+^TD-!1-`kic&7&tQJt;-8t(cZ+`}B(PijGb3!b_-De*>2WXo zXPm%p@y|?nyTw1_$#jc5r8Q4}1>NGGx0_<=XWbM_KPUg(E&f@9 zro}(^v<+D_-8^Py2U@^$#jc< zCf=%B{4-G`-Qu6O!j67s$*)`d^E-;s;-94$E&f>@qs2etP<4xcW|^{E{447`E`qbRui@OXZ6n({|rObE&iF1z;5x+%;>wt zKNAwzE&dtIuUq^xGx~1v&x8bai+|P#Yw^#7nKS#p($9%e64)*NS>vF^KNAwz zE&dtPsayQBCSn%eMba()c`Iz`X9i7+e||?XTKuyVqs2d~ zW3>2ZIaC(^%rYfA9bNFxL<*$Mf2p6DmRkHX`!02hfBs~iV)4&}1a^ymX34Kx{Ii;< z#XqZmw)kfls&4Vmgamerf7XoN;-3i#>=yqF=GQI$Su=Wze@y~<=c8h-|B(PijGa-T9;-3i#>=yq_NMN`4XZ6n({|u7R zE&iFcwr=sygamerf7Uo?@y~<=c8h16TGnuvw?y7r$@Ul#xTiej|*XY#6bi+?5- zuUq^x0Sn#YpS3<{@y~>ubc=sx_l<7x&+-Z^{+T$4sVM{+VXj=U9JBrcbpQRWr{#hNP#XrlTviN5$Q(F8pkphXFJiq+<`dL1IzxL13 z_kI3)dv0&TLK`W&zUt4PBQQU=GwYLoew_h|FT-+)|9We4y>5)Y!>)X7qq|5tt55#< z<98oj)7$gcV3xn?YabJVx96A7vEDft=lAEApG8HW1rHim}x*+ie7`{bWrUS^h?ITnrY zg?0OU)z6<9_deCfB*YG|MABkKmOyNfBWx0{_Ai5^N$~&|N8X$ e<6nL}KmGan^Uo0?{^R-cFRxF3`8m3o|NDQx;ZBMG diff --git a/MPK25/Preset.syx b/MPK25/Preset.syx deleted file mode 100644 index f0246084dce4f54c1fd03d29f3c7eec72f20bb26..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 682 zcmXxixlRKC3`J2pwrSV`NbGCaw0W}r9|S{EHkCKCN-2dH3=jlC^!&g7-~agk z|IhzktylH)e6Cji>wo_5^=f@OdwX6zZ|n1Fy}NmLIX(N`{kj_OZpUtX(fE10KW2l= zZoa!7#=HCdG#g(vzTQ^r?OCt)=dl}HFZXfnaXLMom+twd@o4aPt~4l6?wjj;ck}MF z@5XnH2dvM2?DM|ebkp?Y_SLl;>n{V`9vT3kY`f`ENA;x3rW*$_kNa^B@V1%#Z?{HL z&)l4Mqp?m`tL^M{vzoAO76fh%+f8HRywv`g5wS~&dvtbSu)&Ospg_y1l%5#0K{x_45LeFq&#%*0GdhP~~ z+kE$QwVK|anW{@K*h}cS0J!g)+8&QNgU|bY;J;CK2|b@z`kcK!LYM*IeqT5K?@j!d z(DP%JHBy}8U<^g9I0*P%Ts_v?9^fVPywhtgKW_^YxIdb#?)RIkJ;2a&qwW4^v^}VMNScnLjcfcN8e-gG<6X*r(@|EHe8OXwK@=WDNw$9XgR-^_KhnP7yTAJ4nK z0`>j+Fx1V#?T0J0)kyBEp6*NN`8Fr^@I>y_;IQtdkK<)?n_z^V!{dXC{iM6Y&b#!F z+Zy0~PIu9BK|T9_Y~j9ce{5FZhk#}>m(a5w?hk#{yJwi>c+z%xQsX)Fd_O(mqMDv; zBxAMQ){Vf&Wto)F^WbQpz8)q!IES?lsL$*5q`J^^_i8lb-Tl0At(rc<10UxB#nr(nGU ze~sjkEz`S+jnK0Jo-)~^L57-l_v5DDt$Gb!L(k6@>KudFhb{`kmm93--TkqfV1%B- z1gfvY4y<|i%yM7vQEcAbCK#b-{Am+hha3kNy7%qR{X_rPUW3=rbCLTk4F*4bP7*pk z5ZHRV>8@qlyTE)-gJ0QY=oYHI~wdm zm}z={K7dj@>SjxyFo&LJuePiK^*TFBf#+FnpFI~n2kP}60BP_z&E@nsJl(Xr?RtQ> z&~spYP4>P?^tpQUte$4$(DUPQ=r#Dn-A4bWU#~Tnn{5;SE%dyd4CQj@q26!VJnNaa z+vYfX4n2>{_kFhaF|1t!V83wtwK>wQ=s5$#-MX*ZD925MYEI4TZlUMkB8zA=zUIvW=((WYTiJ!z+rD^bfXZqK2@k$;3q1!HhdJ43uL0O(R`cz-m(?xwY@6t% z`CMCc4ZwK;YI6Qt==r!neV(jqyK_@-)3=hJSKIZi=-FlR`4Fiw$cr&=PG0ISj#__h zt@ZbiKP^PA!UNKHwBDTZDW7Y9_xF*HkAcnAg(%=l0CE3y@`h1GkIxbaxa^m*NE&Vyq1Ay7Cvo>rUNstZyAJZ& zM=n?$_xt_v^zh*O_PkqN4e*>#o89y9ygqiffsp+Tw5#pz`Fvg;SL@?sb^YN1S}zF1IVvy56_MPftCM&0~MFO{^Xl54X)>gS+F? z*E@3MF4vwUp~rPS2zP(GXq>zA*!udBClDqK^fJD=p9kUYZ$wwh!sulH5|AFxP5R&O z*O33XO_*5PM7C?7cAdy26v-6UT^nw(2RJ0BL=N5n+qxOHAq+_671IosPFUc znayMM*xxYR)$!@#Y7CNg@;urQRvXV*Z_sZov^{e1$T0HF<#t{_z-WEJ#O1j;uQz8} zujwANzh77DHNDXK9ADd*a)bZ3%q6Nj&z?iSk0avI_xW@=d$+51k7EQL*KGJhYTB<6 zFL+0T>#4rH18W1U>khzp`RO*n$cjjlo3FhdeSJT8_5POEyC<&4Ho9Nag;p*d>Wgb$ zv5&m!)$4^EyT0^BtZPmkSpdo*i3Dd7PdOUYzd56`QnqagxK!+bXhpLqMd zA+VN)>7>=kJta2e`RZ{USeO3SH=pjeu|eJ5o6O;PbI8|yr2)GR%>@ZAjU(|) zC_hKApV$cFJHzL14Sjc;tgGw+-x^LZkZhiHX)uauzM*b51MLf>XD`-G==l@F!N;rN z=6c_(dr~&l^%Dk6#r^TDRg(9Q_2Dt_LdMo`+q6ir zyA&O*kEhd`ly!>vWM$Q9dUuWJMcz3*#_)EyYPjX|P+uKLNWK~X91PvM-aO8SvGMON zOiio%&Q4k6xi>3tN&Y&mpGbB4Sf5yM#-?T?(l~ph)wd0NcB>N+@&lwcj_fW(lvTlp z^AvmoqzzyekalDKc9(<3XHX6JsPvTDcaY8*&7h3kCCvM@eZlt)r2XS?9R#|&L{j*M zvfT$ryI$M73pI&!`+~1Q!jrr^4v^Lpb(ey9pzO8-A8DQXhJOcX1E}Uly9*o2DoCrI zZm-EchP(E}Z^sJU*}RZvnV`lE$LnnLxNQ3Ubu6mgg|Vr&9{?Obr=qYAr03x+ypFe-_QJ)r&md2syOvdD+kDE>QV@7YMQ*RTu&@=dcjGXgnOpH^(@R=he}xrz;i0-K3)n>B~ zXW=u8fy2#5ib$*$6r(tAR)+}3es-#;zm z-D!M?N@7I$isgdfP{RIUw=b=pSD_ngU@}h|ip6flwPm62aGllm5U{s&&%^588q8i# zQB!xT%a{H#QM!ntJ+*;7-J3c|>t;^$gI=nMnb}0Nylpjp4A!>apENFU^L$%pa6}`p zhQw}%`T5vV>b?}o@nyu~x5oF?{M-|{-_r|Mqo@0Re!1IUAoBv5ltDu7W%e8kz&loB z^mz?+pz|?NN5u^4j^``q(;mco8_l*}ceFe%J!6=S?;2}#sIQOpdaBoRy;?}meH#Mz z1a1f$z22&CYfj&EOTC6LdDhwm#2BFQoYsNkA*J3guR*p$Y=4L`{m2OJrzhll;})0q zC}3&KXtN=Gy^uD@=(O6kA>$C;VSWDIvK5iv~FEC>i!T*3%ubV5R zG5FYPDbm28K-~w5c6*k7s?Zk0*nad`h)4WcOowLZ`;*35d}F;`aJp*7)bLt!z1EP* z=y?|dP&7bx2a573iXQ#REhxIRSPMnt+dkf)+ktIlb34!|w|unQ)#*l>T(j=3nxXHn z8Xru-?GWGi?Qq6~ZimN>l5?~NuWm=3G(%@bf{OLc%HOh1T(-xPed!+M)Dt065A=(k z6ed+`&D=%7-ER*-_e$TRA9}TK?Jt|brgnSIIWxFjT`pVpLEZ*UPn+ZZ6uGc4LdTpv zKLf_`%9f1vQcbb<{pGnmvU>@e@D38EJc<=&6B3M&9(@Rm1FYBcTIJmxVu;ulfj()M zo1o-d;|?F#9YW7(%sj{z$6EOYe_#%$zqcTG2tAJl*u!C;1J?b{P6cu2%Nllir5r-f zTOzy{h#K51_|;?=o{tyro-KhLLeJY+1wtIw#hK+!hm3oZY%kYmljtGz{KE1(KrdQ6 z^rRd@&kJET?}>zCGhoXJKft2H3XC-68b+*8nFyA41Q6 z4e&At7<&F|fKwSegr5Hz;9P@;(DRQ2+}c$PkgImzrs~}x^jrhn{XT;o)3G}gJ^wL- z`ySvi^!yrN<2ws0i;$RAOr~e>7OZ4s*lzkKEjwBt_mC6U9)lMndj^K`gntT+pGH+dM+0DojEQv*cRyBG4$L7 zxxgJbtrBK3T-vf|C(`a1dM2D(LtFp_7U&{Ed!7<#T%YzxXh0!F+o0;;C08bRl@%N&cIgWL3WxIW-^ zFxoeCPPrcgG_FEY`CdL~Vv@mq4lwi_??`Vv3vS=hId@p%YtaT0|HSP(I;T!}3_bsZ z+xJC0ZAoFttI4w7ar=SJrj1ka=ZTF|*`*4%@92E?4$J!i8$aN-NHyp@ZbV89dOn4o zf5PoMI;Sn^6ncJNZxgrg=$y>_6nY-Za^al|sa7YJ*rgT7Q|LLwKi8WLgg>lgZ*=^S9_S<}OjUoRxJizbJn=PJoe?q<)S zXKUl9RdU_ZWV2Gva|Vk)H*MT&0N+>2s&}hX@#nXd0;KG>am(zy$LLnO_S^4Hq37dj zA7m}t?>jArd^?JPO)03e{p#RWd}3V~fyA1wTLg*@OxT@6&jYi3s=*3M37hmj$gEr2 zzr<3lvB$z}p)5w(L=IeYW1OKR2=dUN=X2%+kzTv zs2vVOH;or&HTcr*j*Y%!`_X2&DB6om_u8v~RoEVT;{tOKG-Z}F2`v;8e`CW7o&mP`$^O{l$UGvkHkS?#+L4uu_&u5*#Gs-Ee;w$QWX{E(zsPh*BFarYZGE7*H8VdK zJujHHG9tDLlUjPWib~2|J$csy#%tpd%X4|~MCFF7nPu$g$resyK~OX9C;K|&?T?93 zD*D^7ZkV47UNxkRL)7}!>AZ`W$^!l5TaGWNobdbImg+%HJ`ClPMMdLxclGi5d>(nr zk@YR+9(I*ua}D~7#>hv=3Byi(9rjJz1HuIeTYG*5CY$?@CH8hGKAdP!nBF^X;q3G*OS4`1}nSCd`@eF2SzK6Ls{jC zZyF%{bSgz~}8<325n5Y08x0Df%cVr`ABVK>=wV^`sp_2@1 z*;95%{pRatzC!Ff^CbYRnwqbzkX@X9@Zy6Z)O-d3wnXi&(gX;;#nn4CVLPG?{4C3=!_N-uIOgiBtg+G@`j2 zEPMuEYb@B{!ybd!b1r0^?RO@xLXzI*>tMcOzUKg`K~Q5f2E~3%-agD`%$N0*HJ16> z4Y>AP>se$&NEucoIKW@n9qgzB)Z67J7_^a zVU7~|8cMuUbJnexUVAf`?3v;I#ypov&Hh7-dWM!6DD8IbeBPG=jCm$v0b8BhTXwX% zLRDf`8sg@DD8R?8;ds9DSkCm^SaROjm>x|o#|T(smjeF99e6)k&knvr8JK~cv`uoh zSdE7iZEqcXfCUqK!p##DNu)SD$Hp&3V~7;bA?3Y#dv(CH+LKxeFgU>~@(a9s3}nZM z8Mv9{5&*HDBz}y5rE$3x{bgxn@`^+@c6C?GdpiS40mhb3J0VO4%5?snb~|?0bn1DP zE$xl*OkOkh^KGw(Qh;|OuyJ8Lrmko=kzk>_>Fp>5IB;5G*5J?k!*OCG0+yeJ@$3#? zX;+xnY+M4Cp*J(C4LMA1M@lvM#7`2$V9T6OdJavTo+qNzY1zTqdhdjd8`T1_7jF>& zsd0AldrKKl1Hv)ZTy0jTDA{@+7wGC@!|Bhx(>@cLd0RgcYr45#6Y$+PikO`@mXyJ^ z`-~&(qnEcED^p-a4v|$9h82Q^TqO9!_{K_SYh(;3M_kASS7z?oDr4sqhK4E`cDIrz z#^ODcPlR-Ruin};)lEf8rRi}7upWj_Bu+KHtVs;p3w!XDJvafp;a$D&l~1Ih<6vbV zwe$fX6i%;lPPRsHYEEB15qD2Z)_|3QHUJRp=gYbMhQi-QUcDlw(bO{Wr7fnBUp|qE zq-ts_9!)uwGBfMj_MXOe^zw<6frnT|!fw`3Ot1Jx@EUoGw{ZI?Ior-+LE0dCqus+a ze&0LB51YiHVnCZ@#erq-Ec&j6hJTpa1-G1OTNw>wa}e2y9eQvh`itN^JWu!bxor|N z-2yG2VfbEFVcmK-?haHGblkTH}qp~JJ>-t zHkofAn6KBT7L}+!8m}l!gRC1`K8`UoP*E&e6J@a0_&c%|{O~hG8W^afkQ-nNcT!!R z7Ofrrbl-{9=3qS~4z#^b3E2Vc{&AE-oO^!&bAaGvIO?dZrIhks-{NhN{`Am+E*@ht;t=rSTiO zY@)nzTww_r&29J7%o@$JvU>^9YaDR@#87gjI+5nKm#EWB^ee$$xWbr8mZv7qqwVI5$w-@Xet9n8kpqA{`!`4Vm! zy|TN>%%Ww+gD2E|=ix;dc3F)DSKInTFW%pk@$7sjNj6lQ-VH9<6f-vQ8yNItkAUBE zBtTPWu0%1+@|sQ2&Pc_NdOKwjVy(+h(A_LD9$rHrZYQ9GU`8TI#A&C2h!-dc-?E9d z{xYm^ZjfQQd$@sta<|CnmuMxQdee7RslCD+tfdb8D83NAS@?m|p`)aIjcYwF;!9)& zAYB3C+q~cQy0k(v%VqT14tM5Likt8@uuax^Y@U5Bt>kx2s03yC^zRRV{Hm;>mBSGEgKbxiouQ%qNl(R+ek~?Cr*$uVzPT&qh*u)8vWB~ zQmpgB+SFudnTLdMiyL-xj|4M>efw^BmZ&g^)kDZX>=Mfg3*B#`X1!Et%ZiP>D zZsUPW;T!oQj3*$rc0D7uEN%GY%ju1Wj72H1S!3Uf8;bI%c{03eBMDGqVJ9}&D@6=V z%KAagvp4Ko>Pk33t9jeovMKZVWrLE=p8zfvIbfIv;Z(>zVl7Mdx*r!$>0ij-cZ39w zxyCSv9!x<(^~0z$87Q@kj6p_^+6Los%lz14Tp;Q^LdtVWCW^KRKFR!GM$7!jKZ((D znUW6HiIq6x8a?&=#74hh*p|G$PZTBohJ2s>npX8bKd&1)x9$-8_TUVepT*QQXlqW% zYR**RZ+@PiLceLn{eq1|k0t)5g{Cj_GpD5YzQ}5s5_%n3rDT>X_Ikly&)6$GM*(cc zcaRfS>!H-%;tcE@rE?x%oM9T&uF}LK>C!NZGqA>ycyO^u1K|l_0}B|tfPp83nuxh< zrj;fg_1b@0iExn;{IWZ!TtJu&X9#t57RiCH^^>{?)XSo-(3G0&u}s96a0&F1CV+e_ zX)$5vtm3%}-Y#T9E( zP@+TDY~KMRZjeIE7hH7X5&#gkfXXxm>=PG!{Dg}RDKe_4g>Q_$RI6)-)eJxND|#yx z3LMss=Fz0D+lKQpRwGLiWepF(r{_W%?KTp8wEqNSUfb$Ysf28o(cf^g=7BG&Yqx3FHCKhN~ulL95y&R#3hIyC3AbkW7u@WhFD_7NRR)X*Ft`!g^|phOG;mLM>Fa}m(ZBM5{O-g4DXxrEkv|NRwsf)ygcGBcX^ZNB6jFr7?~W!o`DTmi^_=|ElAjl>ehPDAgKd`s zAtpToE?6$D_Xl4^R|R4{);M{zVT{$!%NAA4GS{Y(B9h#vFT&fKFR3JwPE=;%3m8AE z+Tk~)Nq1}-Axq=H`openJ_m49oo5kC2eEhe0Ato32Cy<~hp=i{>5$G4+W2AsaFk|m^Zrekt*6kVx@X=%JUfkT+kMpX*EYY z$LRT2wB^>$2|FCg{D7V$^qua4sa8e~75x7%v;|WY;{|f5;g-xVXq$GGL<4+KTLguL z;vO~%u7s|R!R@9ob2bidNntK@Vy~Ft%M!j`x2dGc6of~<96R65bnO@1C-+v^L@76% z-LUbE2VH1fV;@o1$;>_Moln~37lm?C_q2nP3N(H#Q42DLs84(Nj2Vx)UWip;R^pdXhN zOm%Y5J}LNEz=AgV#H75J^?vSsDqWfG?Z2(}6{Eopto~JR?xp5*MK8J!@Uli1np%Xh z^2aAYLMYc*k~`Lr4hNK8yZ8e?$>u(_faJTF$F~@XK{j^%DOcUwib)^Vc#_8YrC+GR zBJahoeR$pBt%u4;ug1$B6peq*|3Fx$h#e~3_v`$Borb071aB0pTOh1k-+Xxls5Vx4 zB4TXEo?Yd~K#0oCBKp|;gN?D3%pI5yr02xg)ws!AOW~;Z59VH`riaoGV^uLY2Kh{# zB(ltTTqYtyF_h#UjSjX$VBTNZszUqpkKg8rsMO}z1k(I4PoLo{?GxW$mC$*E_Bw2V zi@hj7sjD`uZu={N-Vnybj(HhSKh&O}^+O+9QM(r;+i3h|Jo-U}+|QB{SM{iRaX*$N z_Yye(VXx_smHkyB*4qPL&$x)pS)S6O9Gq$#m*d!pr*;4J6x`t2sG|d#*5WW8i6<_% zExV9X1U-Wi74&T1{uj{Dj@2yiJ8tK9$(L( zV}{DN9}#6rSf%XUcCG4xh`w;Wds`vNIj^=-o%%Pm1M};zXh7b;B1fS)Y5ex(Xwt8T zx!eJ)QMjlM4mjq)x9JTN7_BF z+ktuts{RI@n-Mc zKd-(J#m8@T(`7GgGim&5&jY}jU=h>VpD@o@{;JIKd(Q*F85ciPmOo&=_oO6<@lTlB zw?d8n4={JE#;zRAf5ANSyQ?zGUwCfsZOV)O3udI`cNS!*iW=k6kFR68dkrQ)`F*-y zNF48KZf64fP+MNp{pB5>Hy+t>K;jB?yxnYxTFeQD?QwhBDmKD|!8pdoz$qd$sVAj~ zj(bO{yExsmcybjCb_&a}3SglFsjVL}yawhk5C8gk!knf2uf~PCZRZYFWtJb2R?WtA z&8!z&ho7$qdwnhdXB=@HF8=^ypzbtw)3Or%iMpA>Se04+Lfy=47(D9_)XkOB(GXuT zyL=v0H&IvG&Op;ouZ5QmuG%R`{5R@47BdBH|3=+i!Gxz+vaUr#pD@Kkq z{sYgmxPdy3eqO8j>$^hgO!-o?*v}t$o>gB`_u@}HFJOe8f5vgHj%Uv7q8NYv0mf)N zYv5G<$=_if-^?{wtnMF;3#l{TaY!eB;CTvx@x1(r=Vyz=V_p4$=T28BAK9OHUcmTI z^xR^6Z0&#Id9J}IyniWcVJd7l}iqQF!o_nKLmdnpqDt>()^gQwWpXhn^ z{GaG~HvUiaJn{UW=()xCQ6lwM+Gu+2#tN#sir}daN7r*ws$A6{Z530f9Z<2={{Tii(NaU)6v*(oe z$&dIHx@A&E6sjvmiLelnIDt)Pv#qLJRf-NH9hXgXz4JP!Tp(fZQ_hj7o-?syYU?p2 ze!Q-=OpY!G80%=${dI~NeEcjuhn|1<@$lDg`Ct=DPjo%LYKc=KniLFX`{D;OZ zIrD)040EDbmfz0!Pi+q{tI+x{Ja^<49`zrPY~ct{>i2+2%sjXr+Pm{oE_!kz6Ug=EVrJ{D}1s~Q%I58Cn;hvr=y{I&=f7>pQZ%Us_;aIqb>g|)2J_;te@FZ+zkBuk4O<@Ju_hZ z{@Z186g~>-)UgTES}QRciet7AnV4+%L3yiTd~+^EP**JWAOE3o2MH_5UyXmqM80Wfn9*wd@1|Up4fCV;GpqYX=zt?V1aymIj&bw8 zQ9C*IBYW5|3aLb<=b^%WVC&<1^Hp5DgBPW3H5Aw`+9n+E=k~D67emB9D zS+32e3KVEuskGn5_;z7}@t^o}uR%^DJ^X>^Hh_yTP2=xBA9(ISOi?pGHvYtOyPm}h zXv~!RPvmvZ0yBJi{%2G4Jgi=}m})#snh87IaV*8K`EFI}v>In>Xq>7T|N5CQi&v~> z|Da~6L%pn?|B0SkL3sWrdLEjfkh}rL7mz>k+-?^Y4>kS^&+W0|ElkgU;CVCgJm~o+ z+wfkDqG)xlqGbj{&v709>!*#JQWM4i|B2_xpVj`?=U?IvCgSQfA-w= zhqZw7zZ+NK(-3j!`7f9&YBYY8(DSSD%qTi4*n?@x+fsawV>nU74}}Gf%C|~=dx~e3 zUewWgY6m2)=ODJ0l)41PG~fD}0#`cY$Alz^~|yeOAM;H=yc-ze7yLpNIk!kD`%Wp+cAnk3#}FB!Y)5_F~rO zoC!k_LD7w4N;7A2?%vRKTVHFK;`oxu+zB{{3X4Fjii%42;YoyX*=CRD7L{o#9$zWT z!F>}vWPtbnN=)60nNw?_V3t6io@;_% zjZzvGhHB#TuFK5r&+!Do#@7ipu-BZ9-)N(7@II%_s>OL|{B43?lp#D9^2Wxs zKdT^`Bpd<%Ih<_{R1MGMZK_~l8jkJFP(tDbM!}h?z^t_a<>Ws?$6)n`aXaTgp99K@Nx@VFUu8gVP zH}!L~az#cfqSK!+TsfnS$DE*Op$|T5FHYmSp44h0_(6^sdP#VD>U``tb(tH_AZ|@FZ0l(zvr0F51@A!{f8MA zDgYfuZOM7sjQh+nlYatTiF1HXI_V2&x}F*?(I`Dt=Hvw0IcFaN3X+J;C3V;1LzZ{* zJWGnglZQYOmrXVyH5<%+%NWv@ergJS%&{aIx7WF9!F(b!r*QM?EAwbICv^#GW;SKY zsTsvgl`Sd2ew~IgX!a=6mS0sOI4KPgaGMSQv4SJS7Y%=Q{@!2l_SN<9NFkRBf_`;w zPf|H-vm0WU6JJfW(9}w_9sP<=CML`*?NC=++4z)wBd>c|M&Fyt&Uye&+T?N z4;K`31u)4a!fzI(FsYST&8m zLHP;T5a=CXQ%Qyc%4G6Z)}7#Rpv=82$6%TY2gkjU3i&$InQ9VL==*kG>`sPWsw6Lr zr8ONwLKxm&5%>Mm33Gb;Jmx~dPPt2s#RNntZLcXMU`Z{N9OuS)wRJ~T8Z=Ikwxlf1 z8#;YCa=9#L=GB6Yx2USH=JH6XC-$l^6*1UoyGOQI?ny zd9*L*9F!=PJHjzdw5^30RO~J@RBWb>CtY92Kot8b$5hWiTjP$SgDy(*Dsl;MM2W+x zvH(Jcv6;!Ql%9yq?A%glxoiDZ|pI67h8vXdRIKEvdL;sOk6Lg$wji#imZ>|i;VN*`$4xpnBi+I^J?f&b)X7p{ zRe>7`k#r?EFD;J9u2^ky=|-;7_Bo3vRe>9KC((4NB=BBPL>lpVD*E?P&nyoe zkDO0Q3PT!ZZ<{BumiChavnl`I9zUPhHj;g5KeLd*`{SKS5q%OF8O<L4LQ{q|<4cdK7YfG7;cjDfn+ezdvT`mAp!#VVx1*k-?wI~I{Dvd-8^_Dc88?L~qT zrK=iKjBT!!dr*xBd%)%28*j$~NlC1;65T#k3HovhnyyEtw7zmQQ4a917teK+ef@Ph zrAKZ?p|erWH3zR8{NMX8!L3GhaEOb0Bz3{dU5;yEh*u}ECQvfo)!}|o07upyZT=?N zO7NCqW$fCo(!`|IkSLjm=Ax7E^hE*;B4BP59*M?K?1(YpK@!ZM{fu>!9Q+J26XLF{ zt8Hxsd^O<8z8x8>7OVuB*w9=sXO?X??H*eN@Vpns%jhKKEd}gCC$orcoi~>q8ZYwf zScy)R5w%l-Ozt)r2=ZG4sQ?~;XrAMcwRojd2dk6HG`Q{e0Qov)J09paim>--i;uTs@zkNfSOKGj<-ze2-?sbL{LP zEaS{9z8uwEC<8U+w(}qFqwdAqVu;q!X2;;f?IhTm1X*r+woudx<9*B$v%+0^XDc() z%Rynj!Ain0Qep_fY;kIIPyn>;YJ!n&dNr2Xql6-b0>v;?>HxF3JZ53id+J_nau#?g z*>G|7RDAQj_YQw!F4y>Nc&3+O0povdZqgY`4|NhnlB}}~&EM^r6jdu>mZ=^!s}?$_ zRS@ZF5V!t9-d!t~w*|9!d#s&$^7S#r;pUUyT?S`K1B0SCteK6`s+DPyv*b+3bS|X` zaT#C#H4($MK?_UW2$U376h{6w>JX|zK_VW<3ImTt;{>sf-B~Szam99KDOF)*#)!Ke zob)r8KRIZKCB3Z=&fiDW~8aTdQ*rR~J^IOgxbmqW~hnlm}%qF7L9fZ17Dj0hd-^^V5jsJ0;t2Un&i(vbV) zc0%J=);UR`8wUptZR6hJ2G%dKV7JU|1+hX6&I+21Bm8TK9^M-|mrWYK4M61D<<^@< z6&ABRCMSuAan31?AJ3X``>7#jzQ%(cQiMv?XX3fQv3PC_lbx-LbnN26>w$=>{peLj zS`O7xBy1-3rgjT6*QRUYnWF`5B1x_(>ysNA8*oqfrd5v{qCTErTNOvI)PfYRoo9vF z1FL7H&$6m=%cv5UWdiamX@PXqJ(mpOBpmA~tWvEml?T*bbPSymYy>xQ@=Pj>4-`3u zv8|meJSIo~PIRH1;yq(UOVanS9q3NJ_gb!ffOi-}iahx;R#NIC@B8(L>$D?BPOuJ52; zFq`A;{xIx_jB9C(g^Xr5ZczcFwL`cCSGS@b6XtZh%K33bfCym@s!5Mj zX+1KD7qzrP%Ri`bkxn+dBAwB@ZGGu^*DEE~+Mjw315D2w71kcNCjOY~2R(lqRy)A# zLURV=(-j8sjs+a_4enTn+EE9M8iyiY)ndQWhFTJX^NjLBD0abVdXj4yC$Zza0=Wg> zlM-488GZB}xeR`7Ysi~S(pAaGY8u-s?p3Ij!}Ud;7)%OzspT`Xa>}7t21q2nBWNv4 zcEP(9G4!Ne-1K@bPo!=8eG$wdTRXi{9--&=hi7fzly&Y1E?awk2lY8aK&$#x$>62rHW zpa#yooOqq8=D;9WUBC$9j6Hi&#$J*b>wKMKf}YAqOxvPH+W@w+{s>HY3>mz;Py9i; z$Lef@ISN~^1L3?)sVkbOpevm-PiE=~-OMf1aw(-jvvivXfc((IaEoI+Aoc7~VJP+> z6PRJ-IqCLJZ%MbxV2?vHE{s(4)EecmYSn8ed~vdPc0s%cAN2Q6F9)Yc z|J@d?6%nX3_#YbA&d8)V63qLC-DwR`*)0hr4oIjcWz(uy3x17*L3h=YNhO;z{!+FbR__>2ydwcuUoCV6 zD%)i!UvWi8eo7(X_$$yYiBja_BgoHh5!Cbgj0dQ$=f&Ut&I-4Y=>F`v4`#Q9|3l;6 z>J98aG@ixLYlvXwDm=>bI6A8Fc=oI6qaSSI$RSh4mS7{Y%yD}ZaUFXn3qh99-S6#i zJzV^rA#xZ@s;Ur<==UJ~NNo>T7OX8(SU221j^1^GMO(5WLfhsQ5xFWQ;zSa^(v^f` zI;SPBCMI$-UNh*>^)&WN?!hSK_j6D9y~y|%F$UmdJk6d%zpN!_&~I!Zc+ZxE0(oMdxrzJ2}Q@nToVZv7KmSvvQqM3)wc+DJv`8w_EH1kNXVCQ!Pg0$o#2NG#_ls z$nHWnAZNYK-m-JRGc)&)W;nd~mTPs#_UC4+n*f}}zdc-HY#f%mc1TF&vD7Re)_<_I zvgZ9Af=Xcv28OL3Z^wAhOEzAe$H7TJb5Au~bi~sbD{d7%hux1t6G3N$hGE=DYueiG zQT6o21{4jnV2Od2SQ>dCzD4%2+FG2?m$(ExD?Y?&c+C7}kO({0--5^_559LK%-CKk z0R_04dKj&{jRZO8-36vEM@Tt-9qzvS59a}_k(}M0T2O2oCc&w5&cAU}9v75kl(WW| z1EuDW^&jKshoha0M<;AIGkVlm(f>#tH&Tb7CI49bIjhfdJA^z2Rf6dUh(Z_b(rv`DDX*`DHpdG`p zeiSnR&rmwTSX?89Kt-S zL40mQG%G+PKln>TGl1+QdcZ&)v6~MIVCi6Be-1S|@@m)AvFDK}gg!adeRlw~I0Y+m zVI97~{5m3SU%`5m-X9*OAr_JQJ#TG}UW|^rPdMBR& zFhp}nGaomw_X=vx4N=|{)MaSwOv*j29uKr*`vNfQZzDH;DOO`MdPKCnVeC#=sOG$1 zUl4VAcAoU!Q!^W*p&-F;4GA8ow0mX;z@I{-{<2Mv+oG4hCSumLRhBOEL>(4KrC~y) zLLdph%z|S9XA&GrYR+rb<1Gky`}$tARE;+n=QuQj9~EE0w9yL(?-IRYDotydQiuX& zW~7y(z`^eAU0X)0!LV|6st2WzJ90>|z_&)@`rW!21bd5#U@!cauI83ODxwCaO}3?r z5OmR%hg%!(G*$kY+Ain8VFzbPnQAXA;3NDm+sm0jDCB4RmtVGP=ncN(s!2o54ai!=_`FI#0m*Xx#lNo`ek0u~FuI_LbYE z@FefiQ`&!Nlgt3){1HCl(57tXMN+0E#nx+DMzF7Ft9Y*ykkgsGPHrSY{6&Tx_$T~L zW0pZewcV0KL$ZhHlYw%^J(9=nI>Rn1*Uu7*5|r5%yBtbasdVMrZAgazYw54Y<}-_YZ2Lr0@ggsgkqD4f5BLDeowRUB#P{& z>dYp3H@SY1sOD6bIWb#%CX;RT6~<~s6|emW-6U2TGsOgx@jAk6qxU4j?JVqLss#zG zjRf$8IShUF~n4vJe5AQ{)E3D|nAmZ|bR` z(Ji~a&taPcj&&SB33Mll?Ky%7WJ^QO8+B)dh)0OR5YljH3RAr#Y-QJSR#~5De!gR7 zYA6*pnC3GHEqI8wE6(&O&am)&OhV<4h(*NnJn}bjCQg0-XHwoN{<^Q?!PYsS@8Ut{ zA|FmMc7s`B;HP-Na-h(qz1DK+gHsPmB8Rz_X>%A7asVAK+E(`B7VD8-a304H$Cjxv z1SPv=7>RHR8qGHZIaKByL3J#UG3Q)06Yy^YO;7Yb%KE<%L~t3>B*|0bql)Lv5KDdv zf~e|62Qi?`Fzc-BdDa>}5i6Y-bT;ddl&&Wcjk9zo3kn6pY zkyPVjG{#4yy6}>n=AVMxu9eMe-1b9>2{%c@#I}73avhOKiVYZ8!l0Y4q+&0z@ap^u z9@J}M;}~XdXA=$U49ocKi?#t<^=HF;*Dh30WPtHckvdsh8MnVTp3b96Kbx(xFh`fgiSY&ZcFflS>sd?e-9VM$%*}SQp@7sySSY2)dKl%Z%zo ztjHkt%ae^dsiypEwPlglLi`MeME2<*4o9EU_nQ*f95Vrd)_5y{W;|(y-w13LMy5~> zN(@VqPxAxTNZTDN!m_P#T=B8-*w!oQJIAfR?Zq9lj;1w!X$1szI13r&Ap06QM&7mZ z6Bc2vmksq}4~=EN_Rv}X6XrZ!K-fR_xM`O`RcVYF#+)k>BF`yfu9;pAweK^}c_)26 z&W51hekg;l=F0%n*FJQo%Xy&0=Y|kbrq&c55-_p}(%_N-^tskY?`;<*Z>Q%IVVX5Q zBA@K9y78U)v_wZt)!2{>Z7`n~q%YuRomHliG#=;n1e8zylumHl33DWr7m{kAo-=Nr zPpKol+1P3DRx^mJSwtiZIM3-gdg4VzNO448{l5r-me?vbA%AbI68Wo(mOXtU^w&k_ zR@Psx95*JCGeuW8+!`tlJ>!di>4`ukW#XNb9BJl`zFjrQtkUyIT z{{dlWWZX)TsDKylt&Kf8`?Q7Fz8Dd7Nd7?oC6+n;!cMx@1i_>SXFi=d!?5U@ZBS~Q zFZxSf@3rN+`5K^cyu~m&ZcZ*1*B>GT<0kmsv9AdWc_=D@nyjT0xSABTkV4dMhq$o^_^J5A6bfL;u$ z8z=Z@Kxp*B_4QrwH&{ONa19AX9Qb=y6rG--O*#ph)n|lnDW<}-6er1y3f!D{jspOc z4x%=|A|$2HYXm+Q%(8Wcg@+!bF%qc9nl0KEK=ej>B?Ov&F44JU?D%>PdICnsDjSLQ z9=6TZn0>+SI|dkI5NVtq7}omn}I3`Y5TqaymxgW8P~`ecKQ)bi^s#R)9)_~iQj z+(M%N;g+_}taAuzB~i{t<`%~Lk6T&@Unj@|bjL4H=9a^9OUI{H8ViHc&jZUXIlHRO zys4BECjKv6*u#e^zm7b{zJFOB`O{NoL`L@E;L~ndxcwN|97`3~5f2oJ=MDY2MU!50 zd8(R7VM+-Jl{~30`!|(BeG*3^pJQSuy;p=uW{l3Z*1ry*XMJC*Mb>u{_WnX%9MLELc#g--e9ioCA|K2Z$(Dr-eAs# zFz-!hK;280{q5eGqTEXhaBr7zqLR*gv4h|4tqOFxH;!KJrTN?}v3f3me!I6+@7!x2 za_>F&+Lul-gp($bEH(re$880+RPsd8XUwCb3Y-;G%Tm3#fSTnhHO6^v1URt<^3{br zv#H_p)aDKc*{r()7@bxuPw>P3<2}2P-RdOhgTc1Fsfw7U=hTe7kf$%cyx973I$88> z|B>Lm&2bQy#yL8t>K5>QC%e(U>jtMjc~uZ)0i)2HFUT3?CLiTnv!xRjstY+DJ-|ef zX`1}NL`jHmilw1?Jp&}~?NZ%1RzZ)0Jbjnl0Jd^N{>9v7STmv3!H{5%DTG}ze9~(r zp1Oevqc|t^se4Ygg@sdn`%**CVW|Xh23Jq(BLfH~=^66sivdh}?*6N1Zc05{*^Uja z#@JV+fHdo)m}m*e5?1d1i%w-o6FVgZwc2YbT1p6M^M)KiG`}Exq&ib>-a0(VAsww9^M{u1H3+?m^DtlztveoCFdY4=3U9mIoI$gaEE`yVZ z>)jx{qkxYdo|}(~<<#`agAWJG*STB>tlkUt_0F}nheZXdwjw6;_eStTM1{lf7@tY2 zT?2MaZgA(#T%2h@s59=FHDl#??=?c~pd!+##w0d>1VSg}4Jw4Ry1_xO^-zwSW+XWR z9LcGs(O-BUYlWY>q4x4VzGux?SYKE@0&UVH?@}J68gn~)*FI4+F&cw5dVV#XDhD2^=r2gux&kI;nsH?x6n(4>aLeBhEa+&rMsnhR*jYOb+ z{Yj5#w}?{B3d-A;`f(GGr~o8k8LSoC^6coeiULcUaM$^N-1GK24v;&f z7USDhZ+j!lK=MWTKXEf9UVM`Gn~NYJZLZ7RJHmFVjE41GNvvs7!Q!DHR>m5`M((aV zi(AVyjBr&#lJX2<<*a4?XU?+8__EGfORuqrUmQotCBakTid;|5F;;e@Kj@pB9Qvi> zJkAMOIP)wYPx2=Zpp{O4Jh^h1Try{kksL-7ViOs^dP9^l4mn8a@?3`lW*ce-Bb{9^ zclwX^cM(+Q@%Q*SA97gQY^1HmmBgB3nFFQl1L_fD&*gf}>6!}ZwAGS7LStmYfgo02 z)UGOOmd5B2rf!~g>1bslL13n3a4ivr1B~0w^di>c#L)7viXU#6lW3hIiiHNo#FM5V zb#R?%@5(7XTV{i>2r7Y@J)@ekB&RM;;uBfKY)-<%#F&;RNlH5Z6PULy_yR#g&UmEx z36hw6oSbf+Dt|^1GFbLs_%(be0MLyS^GoXzssSFcJrYtScZr$i^GpCNDSqAN%#9qh znk`&{NStBWsso+)4`sGEf%1O#3?Jk2Xp)#LP13xk6_H5z9w=EQhexR|ukNX|}pbI+^($GbeJJ?=a! zR_EwNcAu@7dPTwUinnAY7O18ZtbDL}Z(zL7bA+8UIgqyTkmRg?8DGm-n>Mh?+z2!T05 z$vlUiT&3|CK*P+gt%?5)GjB%6#EBHuI9~@=e@DVBm?yl!PDg|ZR|#7RHj^&Q(D+Dp zX4DblVE5EU*ld4sYMBp)S+fAv>y6YpfiCNRP8nGD%;0tG{KB zk4|QKdM^|4t}%H1OC+&vKJ%tg@;?OfvxKUw*kI&@d%iy5Mn{`F?H2T6QDZcWn_7Y@ z*`g&GjPENB({ z#PB)9#K`IK9TTt6g~kb-thKgd(hE5=xrk5miD4$ouGwyd#DtMRs89zMWy9jLi9X^g zwvI5nW-v#uo>{z;I^?fKdJ+Ng(G*2Da~vTtB74#lIpSO&O=I#}cg-2;`=J~SE3++s zN#Ww!qCyH)NGC^pHEYgz5)xyLAk9gLR!TESDI!5{_Mw=dyi;>)V8cjc6JU9=x@%H5 z=U7ufOhZT($qzCjJi&GNisCF)RgfS1sS1V@=^aG*626 z)y5OX^a<9~5GB$h5@WQP7O#y&VRgIT_Gs6wFN=i-rc@*Z{=IOSZ9@P&ic)YPe)9^46ZSkFx{F|! zYfutfET2+VTMy- zR<*dp1?(}YY_K#=SVR{)5I*w`&IpUilR6t4foP1(!idD~%;~h%pOix3+s_JX->*Dn zjai}D))S!}b|+^o93Wz;P{4Fk`N*hspG2ATId(3F7MsDklro`sDKik;3sUJZ)^Qym znL8eYTx`^1J!To8eEJ@Q5CZGCj!@!jAyi>0Qx*$%FNEX{5ofKp81HLMhj`6%h22_9 z#n>iA47}&DPm;)3ho~fs5ZAuPWAF~&+fGNM&Ed!WSk%;HH2b&=BRVE)0!xa)ACg2M zNjL>QQOfCfQ~4(Ru6?8S>eA8@YTyflaw}c-0{Xl^my$8LPPo3Giw+PuP~qzmj<`O& zK6e6HHdDyYYUU!DjVl!Ry>Txk{`AbFk=u2u!UYgXSERFN62UmXxC?p-924#uVcO%j zQuo9wlGVadv2Lv2=6aFt6^z;#67CAo!xyq4z{23N(baqPd7$fF;it(I<0Kv{;(AMC z+XXznZE`t47cO~o0_mfug?e4NuqTxR=OQ8Aj_FbWnTenK(99s@%)?oEWVRGS_ben; z1t*y!^6xH|G@^$}KSoXMv(4DuFuoQM&5VgSxpWf-klZ!3nMnzySu$^gG`N4qFkg9Cn&dv z&RxSZ)Of<}o}z&nY{tILI-{{mG^rR#;)@7-V}`A~O}VXS0lz)Rw%x!ynOxT3+`CO* z7(dyp3ns--5>llLz4erv@v*V-gL-Kk5kaX>WsU+ZV8X`P_bV-0ui@MGNQ;E%RSD50 zOlDO^YMjfh{4H#Hb&O$@>M$E~(yaFu8(M6Ksiu_E-jZ(P6QX&2YQXY)>O(S%V^X(Nzx>uhTxd?XDpy}n2Et4&_Kgj z;mU*gTcavqsWfLd5^C9Z$9gQ6rhSGh?*+(&9|KHH8m`iA9I2R6MD?9okRo@?7+#kQ zk!j$xK?Z^mYJr}!hPzpb-Zjiy_gvN2P)5|Z1zO(Pf;O=)JpWEQ&O%6@oSp}aRduGF zVD>m9PfnBJayngU0Ln8Bq)SDlPCF6!+!C&WZ5m58CJl_a!T>P*5n58JMm%Y)r^!@A zz;6{@0czF(LI>}(lw0T0bHZs{qppY;=(&C?owP577IZiPJ|r?%Gdk9*x9&g~t`O+f zd3XTLYpR4V5A9|GaOXH$HnX_5q8^q8p7ewG;y!1!2=p zyD*E=Um8~v3nm`n>^&{-pYsEdkU-<@>z@WMpz_9II&@ zIoXj@#=n5zDDJ=akxz|V2LpW(q_$Xl#65esUaxX;x+Ir5#~6ZvSTjy%wVjE>Cxypg zgj|p%E|qeaI^C`@nX(W?{VYVKk?e7dd|+rjW!{pN3SBHI@93f!2PX+KZNIEcfQS+c zy4>)Fu8E<^5%{AGu9(CJy2RcmR-f{c#oyWwW*hn?a)#R|n3S4oUYR@uvl;lsz?T6= zR=NZWno>Dw)7d;K^gD%xvg9|*03$0++HKW4CYanR-(44Lx%t5cLuq{NA;Nu--vLsj zSIvjLlZ{hGwm|MIK^`Y@9RAw_uN^%?8qt`AE6R?wjLnzzEk?HPk49+L!v(_o5@^a0 z&pwut9VVv=cd(?E@d?jQe1ga-qMo>Pwo1Z_##veRx^lSL>0&rZkC*3KT@eqhuiKo> zx@f~^vCer@ALk>{vQW!7!Dn)k3XZUwCy~=ny~`G3^SPo!*8w#)EJo*8Pr6Zw%Tva< zhOAf~QnGSu$5xS#IA9QGb0-I0zTl zkX62P(t&>OJcO~ajHQaHWS#7-6IMyCjN}~#%?F+iHXp>rFxL{+C8i6;9dk-J!Iqe9 zLC0dE19=6KFNE6kmUT%43Odb6;EeI$-0`%{CF8P0=b*QK$nJ}?Tbw0uR4 zv5MSeIrr#QRMoTeor2TWb*Sy{a0i-90>y#DJtr86Kp8L|1bsPLB)z$%z`9Ma8jJ{x z$@W-a#K01TLcggrA$Li59*r}IY-Fsdf;1Stl>nEL&fta)@;4$zDVA9DIo{%0RDyLd zme{jK-GA0~bwD(iYT0tcIYqlg0iTmboN5P$07ML=qhzb}juR=9LjOCTH70d>(J&@& zUz3npA@LzlDsV~$5we)g#?&4o{h-*vkEWv{SuM>}X!T*>sXb=lFEDLpE?k z$keG7niupUUnq#|QcPvW&+hAFxWa1KQ}}QO7s#`nb&(GF%n^5vw!thOGIPr{>a9+O z^^=2!Cx!UX>7wq6>W~M-<}{T&fo74OwhRnHOC^GjaiewU_uLnJXG|;7u?`4$XLi}n zdNpaUo?}*ti8T&|+K(U%*FK-G)AbWWO^OBU$E-|b^~eR(n+opF1|x==nj;%%Mq4+1 zfxcs8QMks1=o-og^oDj$wHTUd_Z&mb0HH~y3wDG9M+E$&E)SvJMu zF&_Of4Vs5Pus(MHj8ZnzTBNLxz(`;A8Y3jzeSGCSkg#TUe`($c!Y&;KEc-9E@RyWGYOSn5M{6 zX%S%?NM>5$>#+4f9xqzpT`7W@l+bgP(|HkHs=?75zR!QA+(h(deT{)^GP}_4R9_zaD)jjRMCdKV;#44way>IQP4J@LLnQAz%fe0K%54VH z0}LmuT_s_6O!X{V^bmxiwv@gH7_C2{oB_r=p-gVV@ln{Q>`WVk!Y2kO|2j~IAbRV_ znLwo${44B}VPbX2*_I4Jo`+hNIZ5JBmnV@<7RcSgiDjxxO`|lDUQ}q?3+vSLi!n*o5C2 za@lBznbp%0;L>JZ1)VL0c%I4_D}_4^#m81dMThd;bde@ZXEq#>PR?PehWxa%lP(TJ z8_?KbB)IC4GFJ_`!e>L@Nr@N(OocC~uBJaCk5T15B~Xb35Swtb(dP&9_~`SzMG}&A z0_M^119^gw(r_TJv7x40_56W63^@&7$m>Z_x+l!+HJsZI$eST(Oa@t71IxExkT+Ee z35MAR2>Jg&UPVB`S)Am8csTXjg{5Mo3l8d1+CQNbwDMGl<+Ll?r7NWBuAr&(hsM(B ziG1?ENv&c%mRrjmtQL4Ac9UU++w*Nn-0nV&x~QCaf3UY4n~#e;(+`+IQd&2#`?XFF3ucNk}h;K`gq-z zan9SODfdCe4DT;UQ=giy>j7`+Uy1 zaBZBD8=BEZdQRMOZLye79Ok?u{jtJqqTz(cLvlTy5^2XlGC6agBCDB)mCt<_hQYLAKi zY1kapKCw%qke z=Vh!vKRk@64}lTt+0&fQ5CRnX918)0=?HV0Fk}sTHwS3h7iP1FF6(1F>4dI0?_ceM&4^P3*l2nn`T9*%gGB} z&UsBju9>W_EsK1eC7>4X=s95uv!LlC5nlYcRNQ1c308H&%s6HjfnNl}$J#w5yF_?h z71bdMtG}cZsl20J9ez!YOorytb8Ww6v8y7ujxi&R#@L>}1rSZ9rTabvP%LZC-~}L% z8{SS9wwKOqsgyiGoypKWrV2RIX)K0ZuEZpX>nY5)od;Qq{&i7ULk6POO=0CwC1?sO z0#}t!ZDmfEdS(lDm}NDz=>~lzV5K21y7XoLqXi%t;K#P$sYO&?kjT5n$aCgC?u^8{~f@I6Zu1FvbKsn;uuVqF< zss|UjERmma6-Km7%bdZ<8tYv`Pni}m@0?@MCuhc}JV`pmYvSt~ByA4*y@=k{$Qgps zGgp7YV~}u##nEsfCE}2%3CwlG%ciuD#*vZ27YIVVSzEaSI<$^ir=L2%Bz6x=f51E! zoC2z#)3Bm_m^#q`rSP5tQ3-ggfpuoKiBIE?QI56xKCl_~a0-&tcSUSt#HVE6z6Mq{ z&{0SeNd-w6=dgkDrbliYL$i1GIpLdo;x2+-Wbf5+6<*8Cg_C#^ONt%0)iveQ-~r!{ zC+B-~d*N5X$w3n8`~?p=VnNUT)Yq}(*NhQCnQvxlUU^hR43-dG9H7F1f*pL>-K^R(vY*-nc(Dyk?VSkS!t>13YsN6C*(Kwl}2j z^00iz%GB*>E~X=k+=F-JQH+^)S}BSvjI0<-PZ>Md=yg+OSzcR>+b|-U)G;SBmUgF` zcrs!Va?nU7S;c}@U$PXL07fas^qjH*_d<{!-wd9Dqs&(3GZy)tl%$l5!4*a_#hDQ6 z;EFm2{%4L{)v-N0ti8af?H)mylKR}xPgS0hX@Fw$h;cQTIhXgkJ^qO2buxTLk<7FR z0>gONZ3dLp?=r7lVkS}hR`4K4xJDm93M-{2V@2c4gqcsM1{p9u%i=K1*M2<$Xw2-) z9(5XGWK<_srNP?(^_?pA)pz?f)Oga^5uRz9u`^C&NbwoTui%C9_uMtq1l?*p9$U?7 z`N!{(NTLaK#Sxi7+pnG--B`AS^c9OfvJC6XWL>z)<_t33dfu1a*0^7%V``}>28*w zB$hF&SmZP!a(6;&mQov%!$Go~a^78G$?9nnh3FuSU!u?$kO}`7!!)UKaAtr8G`L(1 zhzB5Jc+N>#B3%YOf1ebF4mf_Mu$Kdm#sKa6=AqW`R}jie;dmrK_5QB-Cu~f5FrDvM zm;>){AV<__9NXO|7K-b$k4?`FFeLZX0jNond3WQqdlW;_lX)jQL{_pNGwtMWSU^Se z3M1(0%_B8d8cwN0_skp00U|%fc#_XLj9dnz@o=@q_fF_epUVER^C4{c^hEDg)+4N# z1idoMlh(;6k`L>T=@llBLj)x^6<)Oo7$iI;Sn=Sv0aB`lAwW)Djw%XZ9oh;6jmyhT zD~fS0VH8#dbBp}Y9&Cqdh8H6o4|~UiVG%aHI@ViK*rgdEc+*(lUmN69dJITlCRSH6 zKG#A_enH?ELYzXeGbAiM_+(c~ZC#()*~Vw$$&eFqgxqPzjRmY4d1yCMre}20#n)sQ zYdpS8F423B?2X+?AFO8<0!A7wJt}9)jjD__N{8EkHzbG>OS|XPhlk1-Jk0bw)!h+%^ zlnD1u&J$_kHY9b*M0K2aGz2OI`7U|@TJ8#88M3GHja??OR1`6{ji5Qv||J0LGW()li zAw)j$xriAa{lBiv2%IWK>fRsw1i*M^^YR2g`&kAPF+A`qfin7!7B!L?pbddzBd3Cy zg7E{)=5j8p5yPF{8b(L;OsFL?W*pA<8@n;rS9<0$kmW-75)!Vbn;5&i$4s0$p9vd; zlq~XGF4uGA@Lt1aarwGgk=7?C)sP?(oF3Mv=N*a`*@UoUfMgFPctx|f6LitVvH+QT z0`#4r965DAK1HlVClrKu6B>-rGu<&ZYlo?d%*JL<_7ARc9$qmyq^gDLLeFoHzI${W z&&R|&(I?!TB3Gm5&=H93ycD^9Fxfd&4ccPHjP6Z!bwIW+oT%;$NqVP53nNG~L3Fvr zH=W*FE+Q<{*eNWDv5dY=oY}o@wOuwBjZMqeIvUjikQ1lrayyXr*V#Dy*&w(BQEp~0 zLt?$6Son|3{O#gjG0K<)4dWt1=Gn^YhKhBuJ?5q^6v@4o#@$11vbDI4HD|1zOIBhF z=4>NrJlrGzC}4^Pt7a~P)|#bl?x8Wy3r8@yialrk2m&(bKNsv~$bgk*z~T@uPB!%M z@I++4FN2ODO6mm6^*YW?3n1sYgioB0J>8+_qK&yeF0)aRe(M{$ST_zm*XoYRBz&TR zy6Oqu4Y2yCL`HOtldLmXm3cvSaIfq{riU@@nB?g0IrPjDc4l9i%m0p8Y|S+-+uTyg z=~w|?8M(#^V(?CQiZT(v&e=!Rj^jZt*lk&t1i0fzQvTqf-luHzpy+IrLn!#}bi8=0@sU=JNI+BIin*i8%GL8Ui*kX*;YYIatgV2F;6}N zja_s}ZqZ%x(oH^LK4pI)=CckH#l`?Dm=~6V-%H{5tRi!5J&vVd-KhkzURFdL9o!>U z+S-VsC?oZn6DJ&VTk^XcTHHMsJ)5!M^%P9Nf*1H;j+0eoDacXm#u?#jX#8|`t8A3P zvRBzZTv0GL(7jv!~>>wxDIoDGd4jSgj&oJSUg%8qh<_(8_o1}J|B&ki~8dlF3&gf9!JS{0; zDu+A-_IA(V&o#V@X-N>7uK^)1l9!_XoLy!G|E z9Xgi)&T|s-ayKnFvMpkF+p@KL9$})EKarP9Ik=5vVfSpNCjdNC$%}INIvC*Zo6TN!r8zmxlb0X* z5m3{2HU=P0sv}gG6c>i9+Ty9rs-Bf@yq|)!beQT{AHGg~Es{hHi_x@hpaDx^<_Tu z11&;dyB*ZW(KrW9q-P;N-C)+Bp*%4zb^wDo;Q}L?qb6W*57ZRn~NwYt6P(}b^@y~>Bwszx6gfZAkCT8vI>9 z&C{mfaSS~dMw#n|R{e&^zSfZ=$pSVhRHI<_ClM^)^dWvWkTeGMzy!K8-N~~U6OOzr z+Ns)0AqgR&gH&S-J)l6wFN&7rhoX#wExQL8dS+jC7J7c!UFe77DX(YrNn4!08mGs8 z;E&JfN}}cXXE^BxaxxUOZYjru=uPq>Piun_dM-=9FciCUgl-vS1E>kwG$t%o;}s@*vB{0}gKCrhXshn~NHG4LD9VYG^93N2@{YFpiJX26^19y`p3 zX>i2p*j~b)9e-7JLUM6TgD0m9If%C&ZCZ$v;+Khec81hI(_4op6ZDxMc(uB~+@{Wg zV5j$dU+iMIWHh=e4Sk`7lUmD0GMHifreVoB;fIq=?>WZ=U#;T^WGtmP)QlXVkVh_0 zZQ>eC5q*R@@Wo6jHyu-$c{Ftv1n&(VA&Oa&>DDf-JST4YdRF<3aY6Bzh{u~*vV!nt z7m6dV>^>M)*f6mJJVci0iO3`-D1nJi4#mUxv;v1dS~0-N^x^r@Qu)#(`YAmq_0mj` z5XLv1D?^9O*i!qZnhDOUW{%mT^v>%YBusSGnDsR|CXDNj`5uk>8Ok8L-ZdXohtyc3 zFv?;S-Pj@yK$w=sU6)P3RW*jE&&EadX}%FvHnn2j7+@F-)4*gxbGY5RnQA_mOr5wH zG`FO`Ht+r?JLd4o0NEHxyy!!z3g!%+m#oZ``j6A0eYMV$&=?<#Jc83=Z~i-a&8a?a z@;A)CLueE;(Ac3>EK`^br;)yq^e5Ap?yJ%|Gk1FK?Hc=H| zO!v#*F`G~7Fbf^)5>;T#AboE3{sO1_?_@?qsBz4<~bYqZ-FY7p*K^c0+ro3-x8G@ z2R{Qc#pUvjmdyS!ht5=%-2sr0kwo9_IrRK7_8*=x*We}e97g{lo3F!_3*C%fk=m@3 zId&&igtcRE;XGlds=VCiz8-=2gV9rczcKo0dW&x~viCcq7vgxN#iZZVL!xN(wpS5< zd8CKM@v?ZWvK$aWLenlCwjl{2V2}`SrA!uUU7Ez6G>m3(x>pE9)XLEFdxNH3J5aVb zX>)Q$lacP^8Q|c+PR?P@$zGt`^4MvWZ_5_P?nXt>yh6JYB-Y2?F9#M(b8jpwS;Voc$xrx7Q0VATs>|wd-EqFUOwS<1bZNMB@{YG=NZs}EZ zC1-DsD`VGn^0mfHZ{zFK=&7$SWa_#>c_K>jAV($@jnkjgr^nZLK;6JR+E)}cZ)`-A z7+*6+u%lLgLP_d|L>9QxX5)*jb<%;Ds`>SK;#*AjxZ!&5ZA^lK#@!zS_yB;0UQZyi z)|CYt5sfQ=c6=?ZuWpba*f$6j=&;d<1{wzgGH?285W4^Tk88JsvK-S-U( zj`8SiC57=0lT1QL9Su?JB}C}d?RB3qImpif398&FZ*S~ssp6d<^qfb;R4NiMiYEou z=JKSux;|D^W&887!R3|1iPDoGjY-^=H{!<4dI%{t#p7Efms?05ZutUGN2iToq%Xvs z;HL?OtbB^tkh#>y)^mX|5iV#t(0dAm7e*b5F-WD<1~#|x^+fnVz1?x8aNL?->YJzQ zB*ORC6X6t~zM^{_aituuq-$aZG_H+a8t!uH z31^uy3zYEZEhzEKb#o$H3*Z5&Jdo|Ee4Fw)WGx8i8dY(`8&L4)+n+EGD($B0Cn%)ELl#Q7s60W5M6$DXvd2@i8lrRI;R>syHxw6F-6mDZ zMdevdbp|8df-hk6_whkI>-+dTBj1jQ@=(G><%wm*mRDkq8w=GJin`nZy$KQniR#4O zM^URzdI*vkST5M9CSv#Qh?27ate-G5KC_;t{edO;ko; z+!-k0h7WVfYSfiJR`0Z}4cDCgNfQDqEi9Mk2l|keOp;QheQ|Wl+U$Q}#E>M^(Jf9z zQB`SJCM30rQ+>iQAv1T>Cm0T#*0X0bs;VLt@W?7e=oX2o)5K^pjbLk_#-ywaYSHrL7jFw8#n#Y7TymjSBaG z(6CYsN6PO4kbEhtLLN~7DA}pQ6b9h>1CsT(5}w@NywG$05%R^|C%C>SIm3r3zv}>L z{Q+|WDKU{;h?_}dFggtuR->W|JyWBI#58~zUIrtSB(8s|en9mWYDRKsslyy~cA_nu z6^)}Z`PA~@j0ghczp%{Y%FyrzHchB3w=IB+8c&pSYSL|YUxIDQMZ|&alZ!H3*qi;%O(^rm1D0Gz* z_u=+D{-Kn1$V9O=y+-NG6avM8jjYXCPGb~CJ}3(;FV(uSae29zwBDEN*5ja}=+nzp zQ&R;Jc=__a^cf}X8Q_nX!?Q#IjrXPG_rvwXifOhlU*0z+C{2~Rk~=S7-j`hgaRJWE zS@-$n%lqsef(+>I5P~{z-hOh%_s%c+(U^vgBT*(nE{nleiB+TPL3I)$AHHX?J#aQl-;}S>p z6`?W|CQs9U?3^4lU>cuLZ!bRV&7xsQ*3H1k#dU$1;#{7D}QK2%WpHOQU zU@+T#eNWYX1+3ykM4Dx}ZY-~lmyM-G)QZ&XMCOWN+Pz9xMq>3f8I9(u04&i1Brc)hX z4As#9p-Yms00d{rQummGNdSurJO;-b5yP~yK$?ICJ4Bz{9gat=2SFWIf{IRInK(=vg z>8wRv{b`(?^Rro$+CCF_0{yQRQybuq6|Cf9!tuleC^Sb7tP`nn)cYpI3)dX}(Qa3V z*^&K35b~WiII0VWGEBjRA+7m_+eZ9q1SSGZEGAbC=0h#yBW3Q0#gG#HgjXr5O$4Hk z{TD%w0#?$|WdTGeVXQ(Te8pZ0maqe_p&Bap0JQCrUEF#kZ#5!XdUJ!DAtJJ{+L1(v zoWu0Rqdazd69>#aod4oqB$tJeUF}_WD2;{zk-qgD{LAA>m89t+z%|wn?&Kb;ALq#6 zZK+*GKqkNhc|aOrhYd>JX(L2jp%LQpQHmd32?3^=8kj*4igR#~um;#oboNtbLt-|@ zRbny#CVfJ$Ba(`~E5>*|zB`NJGNtUU&M}URU_8c`&X@6%`b%#%fLGJe_?ZE=BhTO~ z64}iVMc*+U8ItB1@J%kfw4xNwtLaQL$Q$CyqOm2w`vhBP)MABTNq|K#-Bb4703RkZ z7Goj6`vfZ*5!Q=>;xm|#29G=l_OT$ELAtvT;|4Gbq9~*d!4?`NJ{@&ZXmI&lrt`Vv zu@(~XXJB2Nz2wsy78X#dR(Ks5Uah*qB2$tdbGYp zn@9l<7NsA;=M2&qnU;VCZV|ZI0-<*brYd*SH>HTckW1r-K{bO^@uxHzz@I{P{f3wX zNN@re`8Z8yngIq8&0tJW{H$YPX9J8)9NnGBHjP|Dd_}H#{Dk&j-63;5ozEGx_#zYl zI5}XhhW?!KNmzoqD6s<_}4Kprjr>=6l&dtpzF;sq4B2$(G224 zCw2hfMFj3hJ#>Slnc{L0&V#=uf?-GM86*saJV1c|S`f`3A>O2y0*plZAXg})@d=bK ziS#Xgu08#^LwW{@CMF0T;As!4LD$C&lF>*CEWqqREHOQu&lx0G%p`Udsquy$S^OLhM)1xA8*g|eR)K6*fH%|mm_dAqMce0rsga*|D=AT7#6D#6SV?eC7?Y?rV4*)UL~kUWXFB*@Kt^R|~=n zVlh(PNT{)B1=~Fy3e}>}bQ|DLtw7_C86=vRdP)F)axj$@i1_YJkyLL=#{&$(a5?TS zh~JQOmaAG$WDmCHxt1cx2?8~Pv-r7R#~XIf!4?H4@iTjn$e=C378)g_lhhx8Klb3X zAksUYWh%bfh$sHyCZUc{*8I0xL89ZYYQR>@&E08ND*(aPWXMW}&V&3r6lYEQ!cRu^ znnh(o-cmfh@-W*0EEXk%;8%L27deh=p@I2nUf%7BHgC|1Lcm22%)h(zNO#xkS^Qk` zi28oGsy?+OZgAyKA^Qm6-?0TjybCE2LDB%g3o~L4+tC`+#(+H=pLewn?4E z&(`bcY5m(964yZ!MSxpLeXs?Z9{3W+T{Ds9>ahwx=iW--fR*@JzzD@OOjmXXbk?wxt5;lK5MOwc508vxe|;>BMnDGSA0ai#BuH>@wV z+Ap#|^oiT_NSqS+Gy)#?_1bhDL-gAGekio|hMTFoQKcTFa&4K`B&!@ke|ZRHXyh z{H$BhFZ_`h+3Q98T$^VP+d?og6NFV8c+g|IWi9-Xyh-c=058JzO|Hu48~UDdvHrX| zfS|{?Wz~L3#yMRD8F_8z`Q(ptw1UgT&$6X>> zNcSNk=>cwGBKh$qNoq@vrn602%Z2x)3x0ZVqKg$ZUOb3M`@~Ov96%$G^2#uZX-#cD3N{NW;Rs6hN-t7z= zq^A@I@iWVd&2w4aj}@)p8&C%Lliee+bLmUT69W1G-WP`pJq|Gqkq0em{7DZwKbQ7J zma*ylCp~(1-z1-hy8g=>`dX^WsvJPjgB*z8=#eDXD`kJh8^6-SSI7jwk&XngM%+6d znjWov-NetWjsM0UZ7aBmpB4A;7k{K}6tz78{(VCg2d0?H^(KCfIz_Qc+{4d1x9vs0N&N-{KO?s=`AvR2dSvnQP^YHb_RJqCWk zef9EdT(J8-NG#vK)yp<1KP8|4Ql~V2f=jRJ%D$P-dIof*JGMKRB4GbMfZE^v6hE`P zJGbp+$0y*F;u-+|rcO}+-z)l6AzY+Y1N^r-rEs;{DbJG6f6}9KfK_VsU+N{LZla4m z#m_1b@|S|rcPsL_v|*vg$8CF&fUU`pRV3c{l^%A*w-tiYPk*SF*^kM)p#-CF{)f7q zWDKf*2>1`{ly-coDwC}N@W0f{L@BG&EYyg7gXpT=4Vwz`1Y!XEuMOLdjF;u}IDWig zKkNm4w|a@6*X;f|g9NRSqy+HihGj*^{g^=_^;QwII|5~9oR-!=V zZZ`M9uMe+jyWQX7=bW$_eIn+Ln{I`1v0$E+X?nI`cCo3#!=eFF9{W{wDRo8Tl82Rd^J6 zEteP;-s0z3hAsqKXw-Xm+FN^ zN#D3~CPKPjmB26iWm9g1Ttxw|Y>Zx#=?!0}Q*2)(yD;zv4-=Pcxlv4&asZ?j0?d+Z z0!M~iEzfVxLGEe)J|^13s|2HQO#HUw?OG)!58&z6y%21nQEylZzH;%7QYPnyivhby~% zZIsX(g7Md@dfWLau^`F+xeI4MVfdNIQGi!h4~-we+cStiMM;DI5R3}nEXjW(BK#M@ zI9;w|Ge>*^{J96$nn5m)q~`(5f_#uGM~5%hQK)XNC)u`=-`NcE_>ZCHHkY$wE|073o&f`5Zs>0d9_eZcT0`m#SVk z)|wplWTD+0|IEi_vY;@sXpRy>ehMAiZSjW9%J7S~0W@F^azNJ)P!WcJ?0O(U2sg(+ z`*pIT^8i8P8K1T$0Z&jqeNi(()NRVwZ^x(ZGv)MSvY_THzzcRfupOU@va!j)oWgmU z3~2}-T#wH_kNYK@91}A}H8b5cdo8zBzAEc+<`~d&T z14$$!T~#<^md^viqvXI7sM zIok!H*P9Zj;gCMs9RCa$7bbEfK9s=VQ}_~&heQ%1ZrTIpbiL(Fs@vY$+@WyJP$=0<;W{Bw(fjCk)6dp9Oljbc8+nSv`6 zdz{4MS_O*!d~^JBjmWnBq%}_=-Lis?e_oiA@*W=#kZoO8X?miA26dOl(z3l+Pu?}{ z_jJyoa=&qQ&BN&dO1VrAG}k%)Inv@r7U2BA^oBZ99RIxV^ZR^8xtZob=}xLSAgkMnlRP!29Xt~S@;4Cu- zxgMAVn8FJIPvKtLW&+&Ol9n`|<9|~&^EcyA+nZi5e5u-t==$sM8LlkqG-<0r%8%;^0P*J<+ zLLfmyr-3TF)8MH1d5V7? z$J?3aXMSYD7ZPTV!fmpIwKc;==3{ppk5J0 zIfL_B-yoZK=Ya%0($bTWF#!zxn9zqt(>lN7pLI+q!DV3`a$To4q}<`n@y|K0&vYFg z53ps``_(F2B13573mVjEX3O%m5}^GxWdiFOLWgF~l+fAn&&%s;#nvPo9k*_dez5g@y`?u**vgXq(KeaH;muM17nd|jd%R>a;9t_s6|S(quWXHc{`yE zO4Q-4<#zn@yZ}YA^&46Vbo?_ESgqLhfo6WkKZomxi_qejcofHxAl4g5UCyn|@y`^T z?oA|JT;eyt>HtAfbXe5!&*uO`m0jRufZiPcO!vvU4A9_gZ{!5F(OehO^OFhy!VHN!?Fusda_sNO|sQ39@W_q%hvqgyCF zRz;cVVa?Yxk>Dq#_ogRnr(B6}uZ~vscs@ah~ zdI0kg;+LQ5y2Yqyar~jfmr7vK{Vq#;zpFJQrQ|`H|M-HUI4Cy!{m9=J{;sP&^8nbj zZ;tgP_7wwDm6R{|owK*=X;B)1&%XGO!Uy>8sfR_O2}&Li%EdR5{&e(|u?E8nr6!>^ z9WnZAiYqa-xPutOBIKfrjBu319=N0jOL3`d?Jk^1moX z0Qtm#wEa1%z~h4&V(JW^o%A@X-Fc6oGB5hl|AqkL-;#x@cBS%}c>zTm{hLc*qf&k4 zOeZG=0#RIAy^}V@BQaV)HBdUnkwm`{{>|!PVE+LX)zeVPX4li8h^Hv``uF-&L=90kvNP-0GoPVlpA@(%?q)2L9541~XWGb*~Zz=XrKP6JInD{3ZGSt8~BU9cD z>gglMt)B*>s-u2K3VulA_accMOymhVD5 zO0TsR&BVCYMyKg8###@n%EhpNPY3AiE-SUr?gl(o5FM~)Kv`rMv_UMhC!qd3jk{>A z4lqDerfFUXzrpH!?gDD|Ny4#boy||4d_7E~!&$0|Wm6;4EusY?n85)lw@ z6|ZxRLW&A&fvN5Tt*Nng!jwUuNPa)1%L46#RAz$Cd)Nb6PBt;C_QR@q5h$&#cf@nW z_ZnHjnv82%!pQusVwYO~H-FPPJ?rMOJ8XMC^b+#7R!6`W>fh1uR|R0SOq2m)U1dfk z>>qm`m0A27N_X!AS)eRb608e?XN`0~qKtAaiJU@=0!+UV>`J=&w>-Mj_UPMwCR!Ik zzrvGJj=FE~mG&0(;3$iZiiDAi4g%Dmg(p?bEWoPM=4L*yFe^%Rm;zIUw(m>6DP@Oi z9l%$#C+XtfDy&Y4+-k!^2~est{E%rDj|4&=;0vXOiFS7pQA%mIb?LyCt5rKta3MqT+mq8M@5F3bCVZv z|4|VvaVSzuodk3Sm?ONndE5>W8Xi!>TD+9$tT4^}>7j;C8%#N;CM<=0)@TCgId7-g6^D4gW)+6koQL3F7R2RF zRltf~Mu;V?PF(XO8}bozTwyh)@9yRM-)oUCu@1rs+6QEA3^i+`Ymx{9-}y&WGSO9aszoW zO-yr1Z?gwI3dCA?1U6Vo2J~Vi#vjyx0n=K13aBmP*6+kCb4N?* zn8#6-huJZy16siP$@w(Io=X%FXmC{dBTH+m$S56{G=-N`xZ}g9K{h1r1>l10u<@`t;!+RsB40Mrj|S2=J+UfWVVTaI<1A z3Zk~p;8UeNiof>+rP=9KfR7FJKYSV(Sz{@ zcnT%lXpcKha77O(aW$L77v3(aMpw*h@`WCnnku5!U8Efh7gwW8p+C;6A5swd}@WF>&-neR=uEzTG5GbIPdF4@C5_j>AG zZgCzeaZmQ=72S?Q|1X?nHr39FL}KOsjdNo4Zp2xF^C*PYuO4^bFA3G$C+T*x#d#=7 z{UC0VnA9oNGwAG5R&d8N4Ft>b@it782IQ*D$Js#Jh%q>_OIpdk;Tt@_w(Kc z;`R0?UB%DeYHBwzz6$QDfv1}+^R^sCC-l*LnHg=bs=!0hS}3u^U+Y%zfkV5FD4dQCV1y*byucS(Sg$0Xik3kK)+EwLD&Ti^E3BqimLJj zRkrohK-|-LgX}tO_-1>HkpMy79Jm-`X(WdiqTM-tIZrqy@Hu5<@iSkmU7!t!YcRgS zJemG|bYa%W0+Y)nqxr0oe&w@@z1}#BpE;|@ZtX4GfGFGr{-|x8?kLos%?|k}ulszp zTk9r%E*3>6-s}(<#VnelFtj!Tcq!65Safw{CRLByCcrxRhD09KeaC}`bRL7u057jc zGC`L!CVLIg(`;(c5knG5TK7E7#@at8W(<0dBpz2&{S?khBCyX3d=s%Oxy6>_l}DxKKPyE;Am+^4WEYd9g@Dm59Ry z->lY*D-*Ay$RwqASugfmZVe9tUI11<iQrTEB5Xrrjd|u;wlYIF zK!kSp@)WFqk;mg!q_^Zm!i3ufciZ=ety9~F7M7~C%<)`)F6v?cTzI|c&B8^8IJqyH zc?+$^_Z&k9jWvzJq;2~ay23X)^8Ky8V2m<=pO6MIE2J15{|ST~R#dkbMXc6caUs<_ zMQbZ^^QbQvd$)8|EHWaDFQiDpv7xFA&V{l!-FudQzB0+7u9$*_TC?9_04X`pn zSv9%wE?;<2h-9e%H!Py6u%52CcxD#4UC;BTOxO^uf0g46YdyWbNb3ffh(9hY8f4IVaUZuQS-Ub`Q3|ud#e4;k#4*-8-MLP zHZ~Yc3-swlN9ya#Q;nKx&o5M_gUi54#+LLkx;@pXslS@Qhs2h>BnQRqsYXr9J$M42 zWMCHmMoDU^h8#F**E?R$CwOGq;*bKED8c}X-||44f9T9p54_RfTA#(Mi6B+LDB>Ig zFEuGO|MisH%0>TICP+!Z1qIE&|5bAs)R3=BNBUx}()`y`PPbH=v;kat)?%*e8N5R8 zzLDVR9uoxB#R4@R;P%>AF0QRe5zC97|I3$K)JR-yh#*F9>k9z^0k*Xd5Bf^nNL=8q zD|`FxC40M$b3_XfO*9Pef}-?d_KCKr%>^N2g{6Zn!1E}gOg&g; z8W}4hm2WTcv)8Le8K#wHDLojE+U+HN=C;tcveM#sutqN{OcDuigsH=#yy{1x4n@KM zM(5L5niYX7pMzvWQGMpd%kL5yqmhTGRW8Y_5!b&XoaZ5??rIEv%CYpR17|K6hb zVCu?pcMxOqE3Iz|_i2mjlN6!4JBSf~COYi;Je(P{^)3f7>NqQI-Dl94!R`)XY(ZGl z`()6Bz}+3h=%p054s+PdXHtJdIt6_#+}aE}^V#t`-j90)G5*PC9C}eK9mFU}R;cXp z87=HYXQZHm7(e+eIuveLF8Lci8>X*5Ig(Rxr%DU^VFz^B*@^D40 z7rX3(&x%wF348wN70ZS%pmB%99#>m62AMmNpiFRV@L>%qz>v_kSu_!K7OR7`6(z-O zHF?TfC+Rg_hdy%Yi#2gk0K3jTVK%ezY-W%qJPN4<(ZHt+JN4i=FJl^^{TRUWC|}>> zRyZE)4!qobbVz~1kOwnkm+h@}Ox#Eoabpx790zQ_afBmCl`n#C3~}Hs=_N|VC{ENF zz>AUdlWfSpPOl>(17KDOvMq+Veto@>4s2PJhgA7orYF415GRdG4|ZGRhg7`f_}JL- z>W$A7WFSTk;C-?!cD!pg_`@omu0u7-ShX2%XC@Lt@j&rox!b50J6_0^n&5ZJhDa0< z_kt;hnduI?HsSED#A>4$>*mh!ztunbq?EnmP=^!O9)N9z%z@_zHz#Gc5`0Oe8>4Ja zN>qI~pz#l1qE`l|tY960^3w?mcHZS|19H_bSY!%qu!~b({H{P(!t2p! z;tzjc2rS#|;M7OBpJ|bt9jvZpC@&gck>U5{kp!GkMNnu=JD6bN+Vs;S9g*%%eVa8w zP7t{{ZThx;x>Mh;`;50})1Sk$?$lQjU@*ufo8GaR;)&*2hv?3e8Ti8!u(6Tp=Cx(W znZ(hWTg7!)0i)kuaQXT^B9hGvBA#eA==1kt6D=?Pc$2(%FO*DW{W`pEP-7n(jzYhZRj3`AH@pE=v^MpMlmJ`AMJ4)w8{JflM z2t9bB;#T`0Kov?X>_wd?|2_y<1?54Ig@(HlGH(uoPV7)7=|AN}iX`8uDf(|Y(Mbf= zbaBWDrH>+aSnZM%WCS56+VO$0TDJ8`PLR{oM{Bcbihg6L1(*n>I$B*VYNluru_J-$ zbT}_B=_FX=v`d^`okd8CiaVVYH;Vuh$!%(FB9Ejh;~lBU$QxN!hZSrSChEicBy01c zTI}p)-qM*XCp8ydREwVfddl63`gGCA8!0(95lkfgz2(FS|2cmaj@n})8)t}0J(2Ba zDYJL-Sg<6Jg1%NBb91rB!>oJ-g$rUK>rDEW8$ScDB#8>VBF7V7i@?5*vh_W`+KBs7 zYiax`{tpWvU*e^gjzYW^J|}GO@9(b8@)9rqpcvvUET2Q2$Q(!55BZN0iyRmhD8$cd zWX7R?q;5Jya{)rF9@blKW2|Zvcq)qsrz^mX+i&P}fr__yxp6Z(C<%6fOz3!X;$L}- zmm9Z6N7@vD;gAk%Iq?!~o>k745-~gNY<8D2Q&H53#`LKGde-}+KP$1 zNk?+dxK_M$Z>&vqXUF_|+_3)rCn~BCnXcNtH4$oGh_o?8se65ve%*=YE_(E~;uBm6 zX~c6Uxq=`K}t^KP-L{PQd|Q?%kD>g ztUww{SP#m98|fT)_E>3*`*3gh4WXq1X|{c{(Q<~yjp;)QfpUnP80lDr=dJAN37aVO zg2)QtXixjn1iV1_6jQc7?OxDlc|3Fi$!{g}0l}BDelUJhe8&JA1k3&!#Gi7hc#`{U zmVTV&@#wjau~(hRX$R}fj~8Y*fb)7njJIR=U7j95rIkQig+M8x2HW35A`O&iL=Ap@ zZFs5ixUS`Jj*JRihFszn3M{^`RN}e6=y+8AV=$a8Tb%KPQ>pJUm8#;(c#$65hQKn@ zU}xhkQkMm23shPdk+TQzFa!bUor?7OzQCPVSP)096It;eLkZ0U5OL@Dn(fQ>zPwYa zjXX9ctYkP9Ec`UtX|fNzwIy4W^ClX^kGZ(XDpc@-!pJ}oaaiue^$V$3*oA)y6c9^x zfHi?Vj4UlfhZ0heQIo3W(+Ve>V@croA;p-TmXJnQK zu1Q8G%?*Li(&YCQ%cfLT?#z8p!F)rz6nONNUxWL;&syP(>S z&z;4kP>m1RbxD|_>RgmmkYs5)AbU5bnfgjr#x zV{S3Ms%tTBn;-AZNYx8k zRFTo}v1enRcM2wMNp|ZK?46YKAUXwL_Lzf4pf5qtg~q&=Y4b%GA;8nZq8hS6N#vOZ z;7F7lKHi-bK>r(LX8U|du>beAOoK>uvlg&&Dzy6hJMKF+f8t&RyxlhI9oK$}=TpE^ z)P`{SeDo(rGl#d%L^O`<-I*}GPkxefOJ1jU+KR~$0M^)h*SV?~MF*Z(!^2xWjy5Ug z_hJ#Go2*zX&#Dg7i)@2O`b%QFx{yLAa+Abm>)YV^I#7JD>r5oF*PB|S&1YhwS|1dD zsg<2yHx8w^cU{sO4g0JR-1$uGBaN^^?QryOpRNePh{hKi_(!Fy*}&c{LRg-c>*jiG zTO#He)G^LGM)H|gi_8;DoZ*l1)?lC89o{)x8eV3=?eM-HX?VrBGQ7F-oE>?W@4pTU zhx`uRLIPMbkr3{z994VpnaWbbAE;u~a^S`**%VMhmkV(nHd@FfW>^euxg>Q$aaM$8 zbdep3ISlHOj*EUaMQ->=LYIwC9ra5%Uv3ZnW=gLI`D6zi|4Mdv+z-gP2d384a3thV zIKMbziTr+GI61!_m>y98$@iks5vdei%b2Sdz?|qDi$dr1)b)~U(3l+a8Hr=W4T6U0 zKthF%VhgktFxf_R-aZ`KSHRXgObI&!L}4i^KeuG=;=)@-wnARvc&pR#+NE$%xxGr) z182q(1Gj#p%QC`FQmN{`DBO{c7oUYwAZQnVc}TOXD^^F=Jx>9~oZ_jY-)4f+>?~Cs zX9qnHxLe>(d3Z>(D_~H-iMAq|DsI34UHYQu+L;T!ySW~wNdc^k>YM7rT#t{mh z4j!O+c?dS?Ar^#F6Z!4ro!OsH!BmLr0* z`HD=UIyFg)0{Dmn@``EeN{R=1#;N~(({+&gelgNwe-SY4uE?`=xsm6Qz_Thg;vK=G zq>70o&{4CLcYn`JgYhh(v52b_YkUB@diQx=tZNO6fEW`Mz?{46iPZ!2 zogNQBA8%?^xB-oi$#J#r_sG=Pz(~G@bd9BI`b~(P%K--@|Hr(u!IEjoy4?>H7jyIi z?3JntbG11A1}@rqASGX=OB5IT0a}}lY4rn4J+as9J7LnV6EDy^>f?c0K|HS-dB#r< zRH4&>Qo5E>F9=|-lTz#EH}H;rLs|z>R0v?0&JPA)o%rnW>S}CV)ZlFim?K;q z;LRSvL^dA7DS@;oMuMOKhuJ?dKH(vS`=UotmsU8l5ZnTO!I<-+xla7d_B>J5E*wk{ zcSU+wDG%PmVR3~1J>kp4dY2-gHyCz{5UQpf#pEOW1HOlvPP5czhy%spZT zNxZmfC`!;;Onr>V32k0U5$YEenHJ`Ukv<>54icLdzF2@~)+AP^CN<6D9nz68%LI2O zg@!!X^^grqVr>@XW5i`1DGc^HZ26fAsbfU1?1Qx}d*|yQem3DJ9Jgr28~r+npS1@` zc>5T!2Z;&ubr3(->Mc0V+l)Io8IBf=WHbQ0A{{vPh&@RBnKa}AnA78PF(j-a8saQ^ z9R@#huU(HgF;yvDuY>q`#*u%H5luP<$wi**TfSY7xHHkp=ixdDut*1IuSa|>Xr<5n z>r4Fn7vnP=nnaO~-0BVKrV*QT>5~5X5g;sHX^{BmU*rS*n{=^a zy(oEDq(gkJNH?PuhwY2Pbxk_>cxQW&Q}orxG3_DNq~oIdG0}vNyeKgT&O5S>YxqDb zMO|Znn`YD|K03Ax2A7)y|qe=P#z^y9u8Ny9*prApLzWq9ipQ#YF z8F9VCV|jZWXMKa@%GHQH$SWcjI}Owr;&z|b(Vw{>2#w8F!qc!qEB#RM0s#EmVTlpm zXeBi6g`IvP-Dbo-A7dXQFAU%h(oL%;(j}aawuZuaZ^UUuldkYOil4_~Y)+~kBtduF z2moG@j#dXBXw`L_N(=zQAM3@aQpTlFow1mf&X1x9u!Rhsxr?rXKRR}U*F7}I08U=U3l~U$398)#R#n-SNK~{6F!_voz^Z$jBq^1j2{6I=d%!C4(W)9* zivfny_JCD9G#MY1Cum^!Y!6suO`+ZwhN6s2vz$+0`22n+dZreI;_1DDGDq_NXyx1y z^)Kq0vQ4unRFxJZVM5ba^7)D8>~0KjDvzRnJ>y+_oy^brO2!F&#TJ7zvJWwE@UOmG zQC%C}kh4sH+|>5)9wS>mV?HQw-P1n76K?;avmhhmioYo5l+n4ReSr7beG)(GZuJJU zfPZjcES#^C__^#3R4`U5sYXZ8%*a$M#uYO#z-^|$`RWLv=ofE1<+7A~W0C(>Am(t$ zk!zB?eyqpkTYPi(;oQR;!`6Six77hC!>fG$B(}T>&qQ3|k--Q^;9qUZEB-&i$_}Ol zW|ma)BP2^BQz8I^=gFS7*_#DD^hl1fx4OvdQf@QNFOOghF_*mwLx)rnoXQDn$y^Q} zhntbZK-g!1b%(>rCYQ9p?c@RTfL|QkneaG5ZUBZE2||P#w7^PrW1QzMMyHA&__k_f zb;jn_DJrNK9aW1z9%2iZ(VbdE0@RXb(7!fD9s;@T<7Zd4`aV^ zceos>y$x)LprOpWZz_Aqdrwp)p}8;|A0Ab;l7*yGqvT7&<(Vsw2|I3L8NEa7ZSSF;BA+kFP z6-Ck9AIV6TY%wn%t`y_J+D9V!=c0r?U)m{Cq=YL3;$N6I!1X6ZQ)%U^U{MTCbIlB} zlIGC2 zO8(6*sT$gFnKN2f)MkiB`aI)c1=!Ii1hCXN%cD*_)uXxlk<1}cwXE7Ve7b!oF-0gO z06VWuBg_xUbc3`zC89(+#kT1FY7K2d70!8#i3x0!C_#1kUY4GSA2ULKRuj zxv>)`xJ-kPF}`B2zP&%h&zO2CwL_?vYIXaBcz^RdH4gmuFnm13&muk}aUd%Pb~UC8 zmOHXucOvmKJ3ysPFu3V_QM_f2acNvgwuj0eT`4xALkFk9gV$oEev?K<%mi}KlvUM0 z=Kr%-fL#8dEA`gj9YRNe8i2DI=}Zk@TzQN7N)TzO1yTS?+U6>!S&{7YQfW~#p%TT(uXN4GX$g_XlSptVbPlo-6MiH(5KT~i?$F>QDZpC)&_iTgSPhO4C0N9A zCVnB}5dl8Yd_Xm$U}S!3qu?>1O4Kv8S>w${#|QYO4Tx5&ZD`OFEOUgu-M^s>40y1w2}gli5@Xjki&q*dg9x2 z+U=Q+F6wAd_W(830%sT*(;uoV$wMqLhal}&6iWLo)e-+WXrPRsxd8bfS@C=zWS=n)OqY}J0KDio#fNz*gfM{&q03W;^0-uU2s0RS+>r&Hr#D_m+ofU2##W~s9Zwg{c zNxqq`pu%a&jbhe38GDBe<1XT@6~Lql;+FKm`p zBYWdGY)brvG(Z!E*;{c=h#fo)aHw3&ex~mZny0kMt{0Ok#c&tEMGc6B1RLv@N0|Y2 zVro&y3<4MhmH-UJQQzu85Iqv?KD{InEp63713nf1QOED?@n2utjrI(HYob(fG%r>nw$9}{b*ZA$?s#^PuHPsp;G<nWfSdCSoO8cqa5IRbo( zuinv8k%GIB$+Itf8C})-P=6JW!N}y4CE(d{SB#EfZkiMV>B7mQlxT(JaLX-fP|%&l z1xGIL;L9H2rzSg}yu;4mdyX33pqFwsAbbR#-`NvN|9poU2iiij*>XO&Q4aMExj`N$a)BcdK+ zxvS?4;i3z&L4*_0iskuTbpaF_2UWj zfOmAcLOW%wDFS@`cxwIr(8doMD3WOpl~5yA6l#|Im|WDAnNS%oSHz)~bIuX51EcExl4 zktu_k2TtQ=#OUA9l#RxJuozeScl?z^rva=b)JF2TU#I`oMl7{rzz~+ORm}7N14~$9 zP7rMZ{T}8JJ0>8O$}&O&m4uBj?-g)ISym_|hq#MEb7!UzDF@RBnCcdiI8x#qp@Smz zuZbnSgF^`5)x^F$J;kzP2>epE3dR2{z@!!FbZX0_rtyh+5~;s2N)0q&k9$8kL*nID*C3c)0m)xh~e5wN!4}2 zyVR{{Q!M8id60U=y6rZvE+|~n_>;D(WYh}{NM@5Imt+EOhGfx3Rq{`}lF+BcVR(S(3QCD;-@ofF z&ST+;c2d2hgx6+Q{31gm5%f2<7fxVKZ|0j!k=IK~I3Ll4k)Ifs_(=~fol1)hxI-#l z;tjZ!+O%SCemlG5D2b*z-Z|@0SCiB9dWkovJiyziFV2qe-CjrG8MQ&?HpQpL6iCYf zR*;m0j3dj?efwxwl!SAD5wjXc*AT6lYJ6I!Lw8X|%5d3XI5f_u(o@g|E+as8G}=q2 zBRcT_yexxra4?gg!Eib@ojc;-RhLSxr<5Py@EjK$96+d?m<2G6r$W~2}5sABAd$PK}Sh|`)j#~Yf~-gyxx~K zFK`e8+_*W+eg~a2dr`(du)*y&bP_5{c{_knapW12Dv^Zj^#SIfZuC4e*=) zF}lV@+1AhF_Y**2t!dYJNi7}&OVJh9R0%ln35q)!c>dcDB67}X_R(#hx_|=ADxJY(_c3s;8;wku`a`@tmt|J$=PcZx z!W#b25cHZzS(vBF(CK4VT!m`1*gSZkR5#7FP%WSlg0rrfPjM&-FR9fDeGbs7aw~5MMB+*gxz_PHa#74m47`?BHyE-p+pD?djfg3wz3B0UC^SvB3}CcKfE)gkx)eTJo*Q z_KlwRO?j01TSMgxeICruA7opzRQy=Z3gg-OU(epFHt>eQdQrsBB7OZ2p%+G-AB=2Q zn&!OR;K?1jS+j>o>#8T#JpK87x#8RnXQqdhLB|-o560<(5c1dB5y&6}?fk!fW2zrM zD|HYtbK0x~*!)aM9o z@C}c1yBs4kke;U|vSeLtf_@CKNLUQFy3%7+E{a7lRSjB;F;=o3`-)&x;-XJjM!9iH z`G^p%A|_05Lk`}OA^LRpZsrX0^}+ynq<#?(=zo09pHM^mT)Y4p3pWVJ_YI5OriS?0 z5|l$(1mj;Iz*`_(p~70`m{2j#ybd7b({`G?0smT|VYvt^avd_RSO1Qd#2ZkKc%0&( zxmIDf6B^ogirn>t79gRg<2Vh|xTF-$;L0el72`u2vTGx1^tn5K6aOTmzo;P~y|e^k zy#Zaw@1!#mSB{iou>%aFljEvPcgDkSOet{ z63a3SqX!M~v-wV6s3ZmNf-=}JJ#|0UXNnAn8kx{TneDK4LQ8%E$xmj_PnIAfi=X|G zm%-ah*E9h!4nG0-$uU7-hd=TW$5$N1`FD*?0<)|^8Cm>npdcag_}|*XNM;@SEjNy( zje<2B7GXui7{r4NBBUqI4jqhSeqJ49nkc}!@K_k2$z%^6#m|zTkWGk0$KbQ5vHV7* zp5QEg)`r(lbn?!&$l3%TsWugfN(dNK>g0D2d;%9Bl1{e?g@R&t0ixOp9Z_`x7M%Hrm z5T6{N&~pfb9GcphBA7Rk@m*AS?M!k&NptK8il5DQ%$qMc^%#$F>kX_d6OW+>*Q!R| z0kZPOF8P9a6VCkPDr*caH#|8_%!}>a&cxY+SwZ{F7`cvvr-WAhuOHNVQ(yL}o41Yy z76ThUcQXH!y`OL<{UoNYJpyhuLrn{E(D?!weA^^<@)J9ST3G1wV|vX%Ohy(I;^qM| zI|>Az#wf0e(ZnWE^bqcx+EUVJ$)XLQ$BBFC30gkGqBSkO0iT9`QsJ4OMyZcBl|&|H zL|@#HY5c^N!B1n1$4|e%n$B8n(Scp=p9aOprca*w`QY=!ff3XOFdrG{K?EpR=WK0? zqt``zX^VKLXj#@WkEummw$3jf?n*4eL?5(C*(|Ps05hWZ8uA_#mYvd)IyR`M4BO!b z+%ilE!tm28sv>jxojL)vt=z!cA7O)K)vZA3Hlg23>>f#13b6QD3E!G~fG~56-w#Lh z-6Uqoa-lySi7zw-|9-@h*`MPR&d8F_+#~=38I?ZNPb~^T!ukn5I6eX_e&!=hg~E&a zsj^7?6br~Qs-HQ#U2y}8t;navT>bHd%wQ zBeEv4PA#3tEd{e9*_GfYYuENqA8o44^Upu!)A^D`z5eth@T##J6z+5SLM+KbTxGHF zz;GA5^9^R*>@(cxSgIX*bAlr~lJcyRn1bjCWo$PrDEvTZfgWE_N@Jp@%nJG%?kfD=L{1Nsh#>$awsWoj zSsQKOhXGCIONENhNF{jT={Dzu;Xy|;M#La>Tab#Q72{kcxL_zPp^;(>VN*|~jHlb2 zhiJj@6RAWxJXJCo>*kDMkdg)*q*`FR%{jKH=YrvyY)Eq`5^AXJ=)my)b>5OqNTo;} zmLq_@tiO=WNcG4h3*rGkOc-Lu`dTBEWWuN0oKIxKAu^C6{3S?bH=JcplTV@+_LFR) zR1B6x8v^{nZ7RPu)>55&iW)cy`TQ4dQ%6A|)dml@IoECg9lAm8ujN3)>i1t{31ueU&2Sz{X)&RAq|665U|WV<3B2U3Ydd|p&>^)IrCQt^mlR0dcUubB@5hPO@zyIUb2bd%Df} zjG<}_Z?`~iE4Mk%f$-}VIA(XF|4M1vN)C!KPHP!&`O|IA=Ocko|AlO4xfVex*EqL1 z_bK&<0r`w$WEpTN!FS+41=Z`eV{ye7$tpcgGs=Cd2ol`$VWqp!Olxp5^n{&?5 zU&$s)C8&f*Pe`{GxX;H-Hc=`W_)oVvABQ_9ke?;G%uy2F#4a&$HxM zBZaHym{xD9o**H!D>;-s>=Ql7u}`--N8nzre@KqrgKl%af4pZ4VBctc!)?x&BY}#= z6Sp;kr8DT0vd#z@wS>gSG01k|5;kptmi#yIvlNj1bE=M}){)%i{1<_%Ht$9%t*oiP z%gCSHraqO86pR;z%5Bams5D{tiBx*IJyrOBM=OfJb%Y9;Mp3BT=G@I?)rP^{h7MYgLUmuaWBOh1;BK{;8Hbt>w?Hw?Ny)@2>E^!tfKR^y+-N&G}YU zTd(Dbq6H;#-R69a;er$)W@fP7gKl%~I(}cbopUNjQxsQ1U)WZE3f#lFt$la#^Q5XB z_H>R!2M~P^aFLBf`mqIGM8GXu;GF9cc|HGD)%vu$OW*ia;2!Ek@3Wh;$GiLEK1Bu4 z1*z~e>3EEc1!KYRQ&mevi6_c1r2AEJ)LJHm=K0WlFsd5uy7t$yy?4~e2Aa7s+!Vmdtd$#DAQ{EL-Kiz;U`i_ zNP3d+2+sdd)duM#pSc@P7=9pi?;HI?{JdaD?j@-%AL1Y}3{pzM2~vG_ zV{q09`?*QyeVI7c=d<|vcgZ3Bv3=Ulx)1KDYD&niwQNm>>x9t2{q%9XFNfz>xK0gl zysa4f*DcVyn}blhyCsK|$xr<;HAd;l4X{8v^~XMdxPyT6hrm^~d2azlP1Mo$6hCvJ-sQGtu!N_PivjRoWD}*D`AHs^j`X+W zNU5EtYG`ART`;7mFG&V#$tngJM<7TDI+z!Vi#cZTFj1 zH7Zz;r=)bLMeIFzM zkDZE+^tbn>}D;a2({;#G9CcPszxa1mfM=a`YKHTGpzBas)jWq4i+jf ztW-5rfKQ~7C;EJgpF_4uE225gv<14@DDj>DR@G9hmU1Nk&t!uI7jZ@~hmh(U{agIJ zCva6ip(RJ3DMWfOGF4%JBbzAIR}Zq;w5@)V9EDVeYjO5@r$}-H!%w7IRpas`RgG>$ z14D9*HdeJ1nt#5<&!1Y+`V?pf_}lV1#>jb-Du=k&vI7m?;^(baM6aR=DLfas!=e;H zi;Zk+ZfgchohhV?aQ^k2s-r2zWmIgC`9Zb?!w;k|_mt!(?_^fhh_&5lxS~}1v@y&w za=BT4-2!bkB;{pCRofty?5ij7zMD-tTh(p{AD+o_BUTvTf2nGzx$z{Bk&*utxX;Q( zBfJp_z_uC-TVzA=fdx~CO(~gLw<lMdNRNUme#DPhhseo2)7m zW3RL7Aq|Cf$Rc@Z2?>59VbdxvmruJ_MK%vGCLy^|B}h zqL(Eha6g<;Eb_EQ*e^cl@o;u3!I4@wk`l^HpyrGw4h5lzktV^+tGr?Hgs>8QAT~fD zTu4!~e-m~>00G^RXwugiG;$2-ogR!SVk+*8FKqBBt5qv!_D(Z|y z3_%bO%xJw+mu%eZ)RD;lqv>zHc3S_ucbhjkTzV0yBQe&vCPq%7H_9VikBN#ia<%nH zLY{e^`XZgt`9ni;4#H`TsT1sFg;9E06Rz|Ac}!?5YrI-k0^s2#x>o#98+Ww)oq7Jh z;(05OQr#X8lprZu>l#KUiiMphh;PnBX%@#bq8>oF32WMZvD(0?KOuk9F*U&4AP@s* zT=^8>`h4J~2fBo_yz(UJoXCke5tVmR-n`}NiHsI%;=nhghLx;x!!-^hE#`@A8A3a% zW)|TqOX-vN-d<=2;)fH*r~qB|Xay+LzG~H8O2={8qm!~(rUDN-ce=PiZgI}nkyRoM zb7q!-C$xf0JGsLzHG?8qRmDqOaUkLDjh#@!k#CL0k~x=>f)u|f)wHw3sRLy zdz6$mRd?h;;Hmx$_0x{_<&I1e63a^^^_H!wlVKFw5*Bq#C{C4Zru+0*5b26hboQyO zv><}(VN4B90I5h?-*Iy2HHM!^#Tf%-8F`Iikb)7Y^kO>apnfTD9zr`;byc_u);FMv zTE$N4Ubo6CUjvc@#w`#>8Xk1Lpbl#H1Y|S8GU_Bn%ov;DOpjoaZYVIMdxs3VVNqo` zB3iW!ltrP=L{@TIY}!WE>F`q0EvJJvj~3emRCOciBW6(1DE1DglqvgqbB;sn?0Cr! z7sJ6-pnhfCF6sbn+$if;vO(r~3zJDz@fOmKsE{X>qLo^Cb3z!KT#uYotZL9$9bGdK zr?p5X$PPvP0x_NhZ1!RRsPDR3t#bxQ4`XAigc7Ooavw+oFi zOIlW(R@UX94VpDuRtsgV&f?cog!@MYpb(?8p_Cnjp1jWxa zqPnns0GcZC&A+I-0V8=|9nf<4KU&U-=AitfX+llBHzsWe7#>jo zNsV{&?|elVdQYOUXeGjqM8=NVD_UkMoO5qdIAt}9i8Gu-2;gEv0Z|VFuljbEblt1F z`ux?zzOqXQ!a;Lf1Qy@cuI8`@SdvT2ECJM3>B`sy?qW*@ElR3H#Hg$WqTa)2iI#Sb|eF* zI(QQOTFd;VTMfG!DolL*DwIHkGf$naQik_U5yCo+JQaa~Cnd!*vy9&;)L%2iVJm!T~eggy@+SYdIeQ#c`cTNGCnFgtIltRW_IIW!ingm5}b z{7_>J8Ku%_6l|$NXzw=(kIuSc{yDKl(#L*W?vi0QBRK(X(m+^rZ%CssCZ*%Tj|_eaUD*`?pDBBH)@6=Yla!?lD{46`6p23T zOBh@fFbY2i$1y#bxuDFUFR9`y?Z0`XiMFOmf~>&^;9m$-G=a9xuxHOw)J7=i+Dlc!{sYC{2 zSGXEsJ{;pDcH-%RX>^uh17Se`=Wk>fH55lhQkPlxbZCzT=$T5NIvNFz;!u?n+X&<` z>r`4`Y?5Bc2xRp*sR4`p6$@KC#wg7gydQqUH;#V%CngTO^Qs2!Y&IwLz{G(1`p?AU3YYLK5R%cCb*Dfosgi$0g4aLQq zz7@#ULC)!X3y*2xq~_Yv%_(t2#-H|<4l8~l82QOtFQ->6=+B%zI5++I>AY7~d zv^vQtL&;AX(wK0slbUP8BO<82AfAk$N-e#~O2TQ$ITxSr>%=*&RY5fH^Db|rO3G3n z;ai_TA^mKe<=3HulD?)ZH$EK%$N??bKm9ol;o7x)Z8V632VY`JGuYTDAKAdf|450eifm@5eNl*6TSq z$!22I)f-&Go;M`b9@_)J{W`tjs$Jk)F}c#bLD`?jMnj2m>+x>Xxf*kPR9$Wa`j1|mS}nku_XmM?2p%hkOr&f;hG+KLybFbj5B9Da|Q zNu3XXPtO@+9xRC=3?`dbnZb-x{HyvdzR%EOKSNPwnVU(9}l2R1hYtJkSozSor zfQUfwK`L1WsX^~1o3y;9e}k;K$O-otfJ6ePBBsFk7X=Fw!UTUH$M`tHP}{z%bMYxk z0=wj%8y7MVU@a-TSqcfFzHpn-xzwc4DgZ9rEK($LGPb#drodPsi(KU90jyKReLXpY zVS5UeW1gja94VlrXySj6nE_*vtL?UK^f|Te5a~hCv@w5{{pdWi`!#%i)d7p7EL%)Y%a&x|=c*m2bSTN}hUeH>Riz&XGT?B+?lg*A!V8 z=_7RzkIxb(!!ZmnddZMJlvUnHpDVYe;LJ^(&Dh*~BsBW-(Y91kdz%SPQp|#jYC&8> zARL<g9FncW2>>wk)I9`yAdM%WEm7$BI>p0cTO1`> z!K*IF=3&e-dTkaOVJT9=G2Cj~e0WtMg_(C%34A7`-jL!$udCc*l!`mpV#`Zikws^d zey7&GcrO**URSxrYGXFHH%ktGgeGtdhIxCAZ-{ zaYf-j&=5b1@!?f;bS~d|sK`bo4%|?rTBj1m3-ud*#u#~}xQY zAt+L{JeY8u*H!#1-9qMxk_+`9`X`rRvn&i2G{n!wu1_M$LHaCVf_?a;!7WAcgma%n z3Nn@6UWbe+!UMFg&dB2DV*&L^Bp^P{4PS1Q0N=#Vs>+F=rGQ%rS}~8jP?kn8s!bqh zmgJAK*3i{2Qev#6dW$|}&uNkBN(hp1CK~*koR%gI@{lDZuTXw7mDfG0xGn@5h1=Og zuu9bNVrHx4+BEPL0SZW+6oHg20IYHk;IhJvirr1R4&^bxl@jgJEo#2R0szdbEcCb) zsfrh7o|-Xk58i<7t|!O|r?2VZ$9bl*pbzFTkia>@=sAfHi~v)(n8*1TN4+`VepTrL zVg2Ge_%NhpIm5L9F}2zJ4_4O$z-1`JW>{2EKXsq&F2t1cCpGO1)cjO zz;#S%8lTIomFZE`vi`L5cH$ z#+z}uKe80H@3<4;WGWhC(UKlv#(_-A50|BfpB!NPgpn@YC;UxITF8bmhcB=}@biRK zfvJ*wZSx#T5_>x%>r6pAJD@S?&UofaR47y=bidG?TrPmo3}~jv5)qo3FqR(~)?!66 z(jq37ykc({h)wh$M^;Zq=CC|;IOvVIzMQ5Pmr}KBIa$7`cw*MwyI*AY1fU|oj(`%` zwVbrH%&vuCDt3fOtmA|0uz=;qLY`9(Qo1@Mp+xA!HlyUMLvR7uXOZFxhtmB=!vz`P zv`@t%$jVs$$$06hP)O6akaBY&CSd5?OR^U9+0=fH3I&JH4feqKM zZs`H%i34CXX-BBoI&>|sVLVO@RBgN%5pNW~_3p=X$M`%t#-G%z7}hC#2QVzyI@j*_ z9ehP~Jcz+Xs6^#Fz9pmt25r2HbQG&F8rXP(Miql)<4}OXKeGNVt_!hZjPVQ(uinh*tyVUmrYuMYO)&KHl>_XA)Ef%-Id0XH6)-2;e36vv3ifG{J_(1LQvH@FU>89XLC z`o00|qnPHp;js_q86(RxICB!z|6raJsX?y**lQF{+GCzvU)X5?&z!W!JnfRBIh?7n za1xxrPF*oiiVnj~!g=DPJ?43d6$ziR!EffIW}Y-#SN`T;p6<`EFi(szw;XL8H7n+s zY8{JtKuuCD67sIuhMF(LH_q>WNX(w?wKzWb(wHtq7Z2zzcy3Byz3a4#wW&5TvI#H#Gv z;?8kd;D3`wjCq)?XH=D7(@!5a+M4R7ua1hB;<6_d2V3EIU5*OrL@`8@O|d>Ir0H6% zrJ<;lThyUbDuhrP8{kh3Xsz@lNE(I2INRN^!dXklr{otFkd#Wz#ro{l(GYiVY5(btz-nbZ?b{ ztH05$*ITB$MAMn5(1@xRG%WSTf!9rn$VOo;%) z-O;J0lMp{?t5K;_`_}G{?5mqG`)5|8c!c`cIln0$x$wN&_q4bqlzWKFSYY1&sgl@{ z=X;y&?W|;yzNs(`KbKC$Bm3g=%eG~tkobA%RIC^~i$Ig)ttoT6_6GZkgQ%}Py2&hM z{gWSvZVd2Zo?$E=|0wHUN;#>yex2u9^w@%mR#tv{bbnSHp)`qkHUOt#H@tJnR9!tGzYaz1VJ0ET>dz#( zj^pNyS(d$lT}$U_-9OH{f1GKXBnzOd;jKow*k4)0y`8Fo+AwH3;q={UD^K-HL|n=( zF8@>AJ&5Z0qNA3_#?n3}cmdc^2+MX~ZBrRBx`7%cf7o>x7Rv;{_(Lp|qLJ3;S|e z=>#3jU+ZRG+tj_UF5D&R48ocTvvMsTThb;0vj3)AdTgqce-h5KNuX+QuC`?oGYNbE zxDA4;oO&c=1={L%AUSb>uMw<4!`H$B;09fEg~Fs1QIPFTGfTY8SwC7>F|x)hCPK1X z_cqgGVyGjb0}^P`X)|e$ghz#t5FCSwE!VLNN*rfFL)XGOgIS_tBi5y@K$7x=YvuUH z@yVLlzK65GlTvYi@dlcO%>)$pjQRboYhi6l&z?vJW`G|SIX(ncd#JIl!^`mxPC~GB zlL)J!pXgdxx=B%cPgr%6?jM&2ACjwBeJiXwq=ep6-Y1b<;%C!XcFs0F98$ZIupNMZ z6?_WZ;_+?*L(`vJlC@KMtQayZ_$2G37~lmUf{_*aO5X9?$+LBueM+2{!+Y6(wMHxv zM!Y^s$2D^!aXORG$+NS>d5yMoye|?b&KIJX82MikCk?*ePM$5sXpsTwHxlO+Maaps zi^#Ag^h4sYX52Y3-27zJggliOqr@k(WQB7^}(aFEr4p@2xp zMRY`3ki67E*~D8yIh|-9qGL#lW2*SRZT3Epo^`|87!`-lS&?l7=V|n{M;E@%UwRG6pH&HYC%F0rw-pZ!$tN=EjYJJ zt)L9wsb7zG6JV2zjg6j%Cy+@g(}x;ZU+U!Hm?lfyAB=sH1T;oayQ;o^P5PML7V`t4 z^BUO!06Km9|^$AN%>@p=fOMj$Z6C*FwRGTW7Rv7Gq7EN}I*S|vL#U_(N zXh+9<%}+QT<1%vj>u6rYCTu-73WbhTq(hB-ku7h1zJ4C532}B)4{t-1&JFWfdFR#G zirPP_h)IbGz24m_TGx}|J-S}v1&JL1SWc-uy^%veXYoDJ9z^10WWJEkX6TLcKAUL5HwmHXI?OnPi@5g({6R~1d`QK?~y_glD;zM_Ub1a-ol?MWdJxiQ<dA~jRAe|dH(pNZSWr0y6!@#7l9*7l zT*aF%5nue4I|3n58hO6g5CMrK$oXB$_0u~^VGRHauaww9k0H{0#)~2&125mB5!1af zE>3duQRuFSUT307hi6{lj5dj3|GFq?Q?E+-T68_>NfQYam{OaTh#lOxvYDku$c&h_kO!-`=CFeO6p9VPMiMS_>c%R1wX~s)xn-ttcYXfaISkONi>}6T4% zw50a`5>-T!jv@{EZXiX8lMx4d?Z>k`YSY%5lsi3CyD674l>@5@G8!mZ>sizr-K)^F z6881uB0B9|UsR>q={-P;co)&(OdO+?;#Fr(>sr;tgSAo-omQ5*sqvSrhHyP$v?9qu zB3&0Kgk=(SvfG!ehKqVkc$dF!bwu**KyGo7=u1|^nlFYUC1}kAi~BTsJ8uTqZfr)d zZl!Q1o7Wa9r;RUY0ladPj)!2x>{W|0$zG*kaW=D8i3e**q--wjbqERt_E#i@u70k{ zI{#xuJU~pgbP5LeUn>%Ii8x7s$BO)d&M7QMjSom914irRiX|GlbTLSj`5)qgMWUU_ zNkhp1F1rC4C`|xxRO?2k!&6Cfhg7N~^Ef@Ock)1PCq%16FF!e!_{n9+Yfc)O4ig*| zs^CTPD~G3Q98eO5bWk)Zj3i|HCFYFOcz_s{!(ByE2=B$5aiVt(_({=oPIt6nI{(5= zj5R#~=KTO!Rs@z>)?<^S5mNy(4qbcrNjsyn>`LH@(FF9aBN`GP&6$g<89}FqF*qt)%i_Cxvl|<%oK9O=g~c~+{{=m1&l28Pj%z{ zBDvB=CYG?Y(tEBNM4bx_rorZy0cf5W#SJP+DjvPVTdHs-hEfm>m$;M>(knf%xaZ&; zxYc;nL8$RE1U^l>rC^)4yW)wy{AWd-JBysubj)VK-iXbZ+lcCAGw4GOR!(h;0{q+b zrU(P(qD~+n%oSjd$Jk^BK^cq#{*Ov4T$$7w%GJJbJi?7y48E8C;0SBxEQV6qeY^sW zvdv=ORh|Pn+(d^YBd`bRbmkj~jRQJtba#0UBJ~jxJTfT!;5P~xxK8O;JQ!F+7+ES7 z4fzNVq_7WDB-u4oq39lu#JY-&QaIxPQ9E%eY4RU;1dN@m9=asFh;t!|3^x`5!DDAONYm2cc@5V+ zwDwTKIclF+E8m>sJj?wsKNHSH7c=&UDXU;ra-K1*hIt3D_!+D~q;b_gUn3Mvg#wSl z`Nq{q*b!rWc##6Leg`EiGC}ckUI>XShg7A2<)iAQs_7He=~Q-=36S~d#m7YN)SSs* z^alu9~X4yixU%ee1p;n8RxusCOaogfEFfru@L8b(H`i~~JnC=JO^@_<+k z-Ac%i54zeUt=-^{oj~un{=IyJ@&s6$gzc6)+&*}^J92^ntd}hj|4%v66%p=;WgRzh z)Z_LCn7CwbHe;BORx)q80{nS*M2EiKzN4~2BD$z)tdjMl(a1(p1!xJ@v1ZYMb6D#t^&1P9RS?w1K-ZHgkw zLt+?2zH2h=t{X~;#1IE(aRy!c;A6Z!Bx^}tQXi4>CEt`UAqj>hVPVmYQiF)=D8R^C z=P{|kogU-#CN2=u2mV3=7%AEzBq*M%J1pUk6fgNE3+n|XVaRfx9{2~ZeemS{8?B0{ zZM_Vw;z^-7^i}Cl=h&(-ElY3EfpO;wVL&`pFwKLzA#2BVlUiVgy(tsc<7O6e%z)W zdAv6FK23RfC~h9EFT%Md$YPC$M)aK?N>0atu$7t4o*OulmJV9wama)kCIQ8`?t`c9V*9I0xW5v z3NRvo2BNwLNErB_zHkviH${Kr^TeiNBh$z#`AghXX3tg5GAW*E+yFUz$--`a(51KhjYuCrs#3&G`5TdDBnlsS1!qK>RYMVK)I^=7-95lwr{5{k zw@*%684dnMq{SEQaUtE{+)$*CpdgY)JorHYMr$lYzV??UX&7W!2#skA?JZ3oZQtVO zc?$)RKGuv#FE_=Y!CB=ocM$0#`VsL(EovLL>K-TUfau}g>Dq~?HK(|aX&??T;ey$D zxL*qqGe1#bnros_DB~(Ux`J);A^*u!l%O*OWI_yBqqJ zEZRXNdK`iqV2XYYh8H3&0-CiF3Ur`au5g}Z(I+B@&{>{@?C#$YiK`NKB*5}Etq~~^ z(3`D=xfDrM%D)k@PRs75vNC&cE1fqKNnkS93xFkyRKcP}fG8pn(7V1xE(J?r(GTgo zrpW&oyR*c`aUBYvTPj!L=`-X0m(=Hipe8$&_IsVOC5i$;5Ck#K8I-#8={B(gq;sIh}zPIUJ(xvAx z@3yu!fANi&Fu!0<3(Q$i4YO10LP(+K1$80+hmGx9TBWhkQ)@m&-42Y7)0RwQ1>>-> ziRA4$?C7D>CG@;O-BUn1lMLlb8vlD^oArQ_n?vI1)fGK2 zi~czv^YC~ndj4gHoy5;b19kfyme{@rq*L*nuAyg&i6?+;)_LRk8hUO8Wj406D)g3C zmRB)@ggeuBj{#mo&$atiZY0*pc|tCIbV5{|j%Uv$KmUwp`{G~P&hEM7=N5en&)J-J zc$N%e7^F|v&~pzDhoBpX_W;$cO=AIM>W;6W=YL8v0O^3z(>3&bz%!EF19EOIDjl&U zIPv@gAd`#A{u|-s-+)a0;5GF8APH1DthvLy@@8z*g`VXK`X?Y$`nram$2|Qk+qI{L zW)Q;fg0yzx&!+Rt2)IXy2z+A~1d z1LhjMg`QW+Rh{h#ARQTTx`m#7pf3Fv7n)X_D?5=jzNFJR-9pbCccV+IByhOXRerfe zqZSpwl#v+c^eZ5$sh39WbEj}B4Mj5^=1Sqr!}xwQ4MB z+W~S+USO3Hr5~CK(iJhW(EJ1G*k zB#mben}?fY?GbXmR=PXPmiwe16h z9&cg;M=7zZmqm@&!?%>HB)6nzYq+>%Mk^_dC|r_Wf`>%Ayz)4YfmlFa>xe2_`05hT zW~A7PV;W;riWfw=2E+hu+r-t$-ELL`7R>2&HZ|;Vd59r-$-?S6E>Ky3F%Z7+T;na< zTWrj5SS@&yWXaJl8A>oj)fwpM0U#)K#^K9jU5)K%A3|!lOE6oq06~*rMkZ~mkUzkZ| znk;eBq7xN0ReRwP-C@9~^TMgf`5yJwJANrM7oh5dEH z?lBd2x^awaCVz}SCVg~&guZ;z`>d z`buyCSJDqetVe4c!3p`5z4a!{o{ySSOlLPz21{c+UccBFpUXJbE*>;-UUiV1+Je+J zjFE(c_^UMH--m+@nq*SXl2N5WWB*E3%PFPLnqLVn zz|4}?CcS@oE&mqZXlgxNn_MR?PNS^o)Lt@~%3L_`#!w<6Mgk7a`P9gt9jsF0Cb)op zmb-}X3F_)vZtSygXPwJ+xp37)$L~hpN(4R^K{QlwVuN;2-D~tt32IzQxW%Pg$WC6U zOX?I{uNE)(!rQq>-Lj=5g0NpNH&d9pp8O|kt_jXByDA@}S8R?5Qs-1YLUI2=Lk^$5 zvj2u-44Wv*xucvnlrsYQgJO=z(G7d<)Qd+ z57kjEGtGY(pfIC6)UjKoZXN?%4k9b86TDXb0YSgXy|}z~5n><0;Sagd_vP^RgkU+jX#Ms?6Hb4+Cei$HM(t4;>N1@i9!9}=VVVwi)l=CPq{dVD* z%;0M5ULWe?R8id{s66ae%!BD-lkxVNDxgu_!8su%_l2^W@Jc+wlVeHZ&&Cmc+(>hFJZFcW*4$ z%BB?SBd}_WmBwQj>sw6davvdtR`7SBgOQwQXQ#FIjlm6YVzz2|ixpkZs}}hOaK;8e z@27E__h~VI0B4*w>`NL?*mmHYS8zH59;LpvVXu2j)*hTL6hEAukEl9=(^|Loi8d8m zRPDe~L8YZ7-Uh(fqH1f;nv>P>da`Du@eeaM!D-`6?S^EE=QB6KX+4vf)-(D@(B*FC zR;$)ZNEOR8VfzW3mhdPs^*mww37mFUt^>f{;4cDUi=7x~I?F3%pq`76%Dlawxj9*_ z-ITaSj45 z_mm{$W~&kGuUuIRb^}fuyL%suz_u8{egdb1lys_b*#Um$8s}D) z$ZuAS4+aT=emii+;aS9o#{cFTIcE+onu0x07e=t6>WH0l3n!`TgOG}AET~#cgk;5l zbDh`-Jx7#6Oi=ZVYn)!^o{|V@t^`|t{|L^8gnpztJ(W0c>aYTA|C zCunXHQ91ldNRyru8TTt8&3!PWu^}W!$R?!iQn>fl76lgE^}4V&wj1ZPi2vxHwLsQt?~q?Bh9F8G;Y{762=}olSHd3{iB4j$4-Y& z*{A|WJajgwI$%fG)mT;)#6Mvh;0%NLw2=opQ<6z2_#dq6>CDZ^>YxwQIR^O8ZS&m1 zNeWg@M}ot)x$FgkJsRfF^KXRIUd-dPG+{Hqr{J`q!|6Fa?+NL!ZT_V?9AG%7YCOzS zKN3>czNJw}<6j7=&Baz)HGU$0vE|Rn>LjDFeNRGKEUFLS%-u@(T(PfzZkt_x#F7aZ z|0X15ks2FC&rf*8!X92v%Exx&@Mps;nYoV8Wwo!pb>vtRr!r;!N(Q&tVdrWY*4iiy z-$m&)%Pm#V!dS}a*rw7AFPV}|f~SuB`Oz@gR2RjaPd11tx9mif0;g4ziTlEG8@3<8 z={1;Rz)$>n0jCw-b)aAQb2L{wa0Usyw-_HYKsNdvs*d1v=7I_<_-&&X@;YWP(ZL!Y zHhKpCEjWLArBu?V2Rr)#JMBI!;<8Yez58+I=4RT4=s%axbMv_cRm(=7uyWYx&_^0V zjeDcFB^<2n4bGHgqOV#v`X31C-XcUwYW+$`Q<6!3Hy@GDyP4bD+&g7@<#PR%kh;po zpG(Ql&n3nMJ8im9q;P>b(971e17}#?Cr3)w>|Y6~vRa_HR4w|Ikmd%IIeA7%0*B&KmqTA$3xnKb`inB*q0*8$uFrMPh9BRe8xmNL!hRB@c?Q zu$b8>bSEJty+k(p;TO;*{|{B&ceeayGO3X&o6bS8!U15T;ozV&)?jiREhfPy50wh7 zT^CEGTErj2{xDoJfHUmJbeYRjJwb@;JeDtIonX+#~N&x;gyw*%bh(^gx4r5$^{-U41jSFV0!`~85GDc4nD!j(k1IW04|+VZW3=SnZgYzkq zjWms&Ws-^l7)&%(`H@Y^0GRAGeADnvL)NYHWCbOuO;bq7o-sDj_UE#uMEWIMboRe8 z&{%hJm`0upgG|mkZ;FvB4@K6SYtZ$egY6{BxGM*(4G814jvg&t6t5V+QRE}Wl67Z1H5^fh*^?m@Hl^aRCd)B zN5R01V5-;JuN&6)bBD-hJjRJy)+b_hs5Y)?U+5=ZpK3@`Pl? zNo$Y2rlZ&4b})=f?9^+om0@a6OqJl8tPvNhcon1I=e>?d$-&HR`&r$MfeO{%_nMC8 z0U?=cek9Q)l(OaLM7<9Z{oZ3>JLt~LP~IO;Npy*|42kx#N-=bXuAh4?673^iym}r; zRPf`1N+u~5@5V;x`Pm}fqH?NP5-cbjdT^h77lO9-s*rj*YQLq*(wN2-=%PYI|S|r*Bgyft0l|6TeOo(Ey%IS{1FopxMx}Ns`WALYj%;;q|A= z#A%|A*@!)r@z1xta-`i1fRmp0v8UOsqz`$=pLp01z`Kn0pE#a9@S`I*ojgQztF{xi zpTOxTz&~Yf-|3MIYR=~X=i_r89+PaiSb1POvd0O|v|GiwW6>l12u}05BoTV9kW68P zCa;bHls!)40hn3cMmg{@`GEt zZXeh>CjLrDe|lMoZ2`tsAt^Kb9-Kc45D%7{6vyS8M@1=(ML`+qX1C*ZnxslvX%QWv z+>=zNNcoclaR}+(b2$BEDj&9N0z0IY7MG*a1Z?X0s{}01s2!ABX)))GDKRF`)nGE* zPGf2qLM82f7)-X*>&vBGp6PD1ol`JuH2cBTcv?}GtbJ)bSj&Tr23sRD{cWp9>JT>< z9Ryl2tz&RBudW&VRCNXlyVW_`f!#y|)(PG;G?y>+kFYj)Q$`}e`#ggcflh**9O*Ok z95z^%zyqukylDff3R!bI82j}K>jZCF9+SA4d>oc-ILF1+;7yMOt_$$oI(S622E#U& zjtjwLxv6}bNeR&`c%86L@H)21D$w{RSSNVX6jfO+eu8y^*AYmSvV9*f+pw0Uu)*sH z{`lirDNO@MSSNU$`GeEg7+~#w6R#8230?=+DqA}m|45>VqMbruQkL#Ww2W~FR8B9r zl*)cWqWe`i?J4OvGE%mBSx7WWQZN~-YCX1`&^e6%RQQV9-<-@gLPI^=*JR9TZO>Qe zSz6zN&@knM6h2J$zrpL0OLP-X`($Njwamuh&yPYwM@P1(*&!(iE@1Q&w+$2G_T021 z7v*2pcs(FI--0^k=1(Pxf4WLGQNB|ojuU*bT&slna9z<^#0*un1azX26YWvWPa=LLA09IrL@)c9#u!|OxMIjr2MuIa_k=@TCVd;5by-& zfSs;N_a`aQe;1A`Knk#*!?iBvP5-`_b z$_kq9aG+0F)KHrv>u@JuuT7nK5?Zo?hVEjD+8$+Ad1GpO>|3>4EzOTalsQ-$@8peV z1_xYex{**A@0mGf&2n1%BITQJhC5w5E(bE94y)P-ehaK~U!1WS?f<-0;FP?vL*)$j4|)D9Z!~x_{I1NUb3-qp zTFyO&2Rzg-Lu@Tr8Ef_Ia^#H%Sf{)(^Qh)DkC66wJ)&}ESSdZhmai2nJD_rzl%nSh za!~;cSSwDs!RtyMrxcF(UEb(0$BN0)*AB18#XGagmjAQHV@o)|TByvP*pWZO|9Nvr zD&y)Al@rg;<&8ZmXLxid^j8v{ks#&&{51Z1!}od(3P#g&XrL^wM_5~8Z?n#+{h$9$ zqBFxvRdG7v^@z$o)T9LZP9)}Ex!>$#iJfUz%l}zV|0L0w33vEE|3acOCbImWehlL}M*V(8e~&VYee{KcWqtJv@1+Y?CwZ;zv|X{o0*vGO*4! zEijLByy|8vtUJ?PVPgoOr;i+xW)B9qu}!9Wz(8Sp|C~M|fj)tCW1EyLbV#ex-u`=l zWt*H)p`Vqn8eR)w5^WAF*RS{ME8j*~uY7>$Qe1g>*323t-|S)b30xJk$TKs>WjfYB zGjo?xL@rR&hHxz>x_OB>6BucIsq#(N^hVE~1@JAvS7X%F%{dRM%Pt{X#Zg8tM#j(F zE138m*IY>+uQ^LVQT7%P`c4&&=X|O+8pJ@VlqzZZok#Q}r{=I0TxB$`8-l;CH=2GG zEMm_uNTt-R5-uvc)S9^>=zlfVzx7sb;t2IEnrV4{GwCWtB)SZyS@;Da_)gs{-CasU zM1!pTXXIr{%m8L3fdz)Yxgn5mfANXKhVY!t}hu!Qs&pC${bx*~iR=FxH;74`;BSgFTvAjWhD6jmKcm z?EqJSOhH<9mC#fup8;LL*KZ-+7G&;J0O!ehqmHB+q)}~zHk3CM!gwgRwK$FBX^AW) zcB-V%u2mCytT0Ovp3;o+WxuELZY-|1Gvr9gHXCD?iApDNk7JXA2j_JOJo|ji9@5Ph1wB1oY1dYeQ2Bb^l&09Lm>Gc|V?pN}? zX>8O+ykO(CMbFo3=-K}(@$kb#?CE(^ODQ&5Elo%Sq{ptEb;USD=Pg|`9*xY#8!9`} zNbkt6w+bR5$!*}wr7ewc>>Aj(t@I3lEhaXE)s6zMx2NMYi1as< zQ^(veHe#&Fi=33KKZSG*k!*n>_k^;+WI|^Sm!8ft&3oZJ9($~M-AA)Cr>lCQXLm2m ze!h9(U^>@7Ioy)$KS}mqXtOd-Du=_@C8Cia82ft%ph(}LF){a(n48iLDl`&x_mV9& zwV^n{DGn9h7eNKys>d3ZBg|)2QzX+3f%-zB3kY??Z19ytNu4VZ?)jB+TjIi;Qrlbd z@|FOb65Axq4RmQ%T4D^QO1R+Ary+e@6qzt>{Ao#f-TaCyfkpVKt0=SeV3)U?3}=sN zBB6X9q|aEd7GsdhMM5ITCk2lQY;OYFA8whU)RVBeJ~{iNt*vw8yUHA4t@|e3UvFgA zslCRvPlr7RzVs^6za9}7^OY>o&Qc{^^LyjBoSIi7+ZWq;}WEY z!>+1{79O=>{Z*C9_rL7}A|-Dc=h9344UdtLzy4n8ulmxxU}fym*zH^0xPK`}N;5=l z$p?a4lXO$A%=z&O0T^}rQnxEm$GenIEEKA62A#DyjEI>eIf=YA)|ERAgvvp1=1i0^Cf@BCR>Vv)Ey zjcM`#=!Xb&V z@r+DhV>HQ;MH89VEUV=~r|fnRKTGjrQqlxgh5OIGKgWYfazr~+N>~N)io(Qw<@L&) z>3gPB>*R44T8)x4c}L!4=TK%d8kVHzb^v($xaK%~?ZgF+WEyiPP!~A+a@k@^Ft|E! zCg-DZ;h2LcRjM;APg^5z9+271Ubi+UDLU1T^UdWR525#*G(N5p`F4#~tJ&|5`TJI^ z1awrH~ zv)lC4g3#q75uJJo_EQi}qhQ}2>hzr<XRb>w|MTgzTaCx7w`2gtG?pC>ay88W0O?96dq~wi7ZX#2aOHv$0MsO;ThHk0 zTfcY?vkqX?g~V-*H}oeaXUOJXbK5~789fifWy(MYm#(3~F^v`15wV~`b2>Yn2IRUq z&*P!4dMM=19)~`vdNGO_O0-PbS zI+w!bGqS`pIJck`?b0rNMwXaxwv+j_@;pE`!bXu1r^gypFwm};jgS(p^$N+v3q`if zg;U4@=#g8P2S4}frW`NPcIA0UpVC4km09v;HX9+uXr4Vn1Gnqo!?D2*5I)~N7%qU&6!e`F{;1P8K%i{2J8Oc-<~G>MkxMez5GZGp z)9yiyZH*YpP}W}*DAe<;D`u9=jLq|NZ0~5bv>WVZkP2!x zlGkipA1Cb#Rj!$#h{CjhKHu;*AI{lY#ptnhDD<%f<31bZXkQ2DAg7B`UQ|(V^>A#! z6F1`!!q_tMDZHcMtyiv~q`q9Ka%fQ+jPQ=lEA6Zk*{!wqmNqP1PAC#4J5o*h#c;^X z;qe)a*_p**$L1IgDL>$iIPf@x<*-wwoa@k4haE@COAIb$!Bi~$BbkaAI*3}|W-tX5 zvvPH4aIFQitymOMi>LuusTiHr9Cm36NcH#(#l8&fkw}2XkY_y}n!0|s|5ek@gCjSl z?ZU2=Bt^tD{aoJ9vX8#K#(|%r^7DLQdnjtTYWq#meA=H&uqEaprgA<$|NWT8 zAcaqA3dT$0CE|qXAoY=+V^d7e?RO%(XZ6fjeS~=1bL&*j$%(ZLr+cnmji!3lNry6W zEYx@s5GcdN-W8;k>e$r04^!p>a|i(W=zP?c{bCQ7EtMgbklUbj`jR-wzy`9^>-EII zo}7BZDfK#=-q4g8#1xaZQw?033LRDZ@lZXG`i7q3B_1Kd)=33bZQ1fs()Np$x6Kw; zd8dI!wxvaM-dhU2c14vyT!H&|{B6^E`G0$g7=aF{3LCw~r^sZO_5I6o1OzovI zzgg0ThPau)ryMwZoZ8B4eHVYJHS4$`Q74;?a-N}hSStq-A~H=1Oy{=3Gtr-hSA7!e zdxhGR%#7)e(bslxjPrBhxI^Z<{2!W+wCsPrC){e~$fwij zYd%gtANRy@m^tGkSt*~*IdKqmQS<82+&iqwPN;OghX+hf&$z+FmA+Q~PnSB)8RvT@ zblcanW!o`Vg3&Zyd(&qv+ZB+!t9==zY6#5r;?J@u4+;R9rG+IFWB40apDgPzhsh<5 zZ=l4CjPOS&Z;pgoB1p8!IVlv{eg$m6)DX4j<-Etoyx8|>`Y}ojU+~{9Q=}}n#+(>v zc5PruJN{XUjN}@nxX$9= z@u^B_W9Q`8XyPNF5G^h|s)%~cmCoOF7LPKS+@YC%mWCcrQ)2Ou>m)c181}jM>?EnF z&Z)i5%{IeS^l^?Rc{dn2i+_*}3t{j0uhl$}XNZ4t%DR@cDRpu*O1v$w98N9g@~6*< zx9mG5kKjz$ZHiN7E?%ymadLn21R1ptJ;gNV+bpc%X;U*01sjjFAbEgyxz}8hJ)@AU z{9-v$jmY;pGb!2*f*Q*u;?0=$advQ+yvg|7sCB9c&N|Nfa*xV)#dL&fJ#!ZtgCBM0 zOIL4A-LWp)8$?3qOLXZu(Y0u-{4pAq7bDAlq7H`$-F*sH(ltKVM}c$Lv&l`?ZQdFN zW)bcOrZQWP{t_2d8^GJ&3onD}e~^FE3KNRcQ}=v{#QGxF`e0CzVu~lmJu}Lq!pu|~ zdCF4ab{$bMXo_FKar70JV)U1s*nn}LqX&?K#z4BXSz-1JuWuB}_lTp{w~xQF%~}~( z+L~(?Ll3B=I;pCt2xV08P*A3c1X9=J*76HK^YLH30V3fi4q&c`1e+#Sn?g^1DI0 z-NU=`hy!nq#zVt&zgX+xUFeuBkTs&jh4XDEPrQ_s9CBb?u%n1tp~z%nr@j2O&3w!1 zG0$gZC6Br}(X@&ahz0ek7s?DB;O;S7o!bBKQdV+PcyTdHB+8VCBl5M3=25j5BQ^fA zk_RDXen?g&I6H|XRpJZ2K@ro1pLmGq&Kw1RQ?V4HSo(E&0jPALR0$9Xvsa_0ZR!)P z1Aj!gWJLIDPw|QqYJ7NoEA$N1mRAs1YB}B_gSD|P?!?MYCSj6z+I#ov~fjiSb9&4jB*UIz^Z(wV}8hA`&s-ps@ z8q1-TNbPK9hBDxrT>7Qb@PDU(+|aB@ss7z5)g3uw19(M0gG*CHTMDuc#@~?;oMi(= zWuuDiAP2^&Fr=l;+IFal*V%0T9Xg83(-44W?FMqY#y7d>ov|4~zR(CbS~Jg&vv!v0 zANGLneU)9=3c@;Gs;u+1;w`Z{UK17)0 zZ)(@Co7xtcQ4VXvB^!3yCpzCuN|dv9YSvhDi8Z8u5_$f~Tid%_<Vy1n`oqCiSinsJ|J|O;UY^s#Ar;eilO`o%#2hCTN z@aS}LPBj`EE~I^oN*SVdB7fCi5thDxRD=iT^ViU`{z%w}jrmO`ozJssU+U2~%oob8 z_e5w<+WLVshgKl=25QDb^42-srjRFa(U*uiA3FPQvBc%M#X^zv^iF z%jeXN!dZWCz;o4>dnH8!ge8Bh;hZU=-TdaeN^|kvhg-=R!EC~dxjkua5Me6~Mv@l3 z*tGtV{-5>!#V@TB(4MQdTwVo-#dRKg$5UDRI<6HqY$Y!17d*EMuW)yb6&qTn7UD=dd&oNo?Wu5y*@_=yQjrX{Gn+d}pU$x@;TA2rpk%0aTR<^>J zOIw9d*=EvPX#9Q9Zjli05*nHWmz+M@Zpq7#P`jOkh^aIlQBWo z7U;x`kTFstiZnwv7!KbVc&?>*q8W3mpbqU8oITJO1SrolXYbBTdVVKIrMm1l`#pNa z!Wgm^1xk*R5Z6R~`*H+qL5PoMQ}RedDe%)s+q`7UE?ROljxKyNujQ z&Jwnz+T??aY^znMDGbq&=B6(MhosTrD3ItOA}&rHWyhch@?49#b}H-a|l=LoFv=w)pBkpv)WoIo#;j2_0%mti;l zX|jBa8D|V-9Y2|&#%}`Zq0S%g{GsuF{JqQ}ZCD!&u#Y0|iD$$siJSv9A1|{>oIX&2 zqef*Sr1i8;fssKBW~r_$E>$btmc5<0hP6ymn%I48o&!9P_E z14u>?hplO)caqaHb{?YaK5LMpPeUVFbVIas9!K9P0=eO2#S$u) zNvP4UL(AyP{NmIDW&w$i!r&tYs(hk+CwsO+ur zM9X}LGINKt=@~@d955Ebeu}Wl4ud(<8HGm&dq;##a07xvOBrFd3#Mov zJ*UpxVBz4_5Eq$pOc7?utXcEne-(5zbXE>>#)@!P&qDE z?#{8NMxVyHwTdiNZ!3ON1ciFkpFt26;0s%k9m0}7#UbR#MB_sp?vP+^UMkQDU53&O zCnX3eMLLClLO%84kvw*l!?1gfWe-=-M+P%qyM5D%d)Fil+^%5Wy%yqLHg8&Hy8?XG zN!z`4eUmpbOqbT)1QyS}J3`M_Ase_9yJr%~1$+u{5+{C%&?s+0Lu@LY#rSf`Y43J9 zNAI0u3T22B8}Z`>>&xjT;n>rPd#TgjYeGzCKUhc|I@70aM4a7Q0AS}w-&mjIkGJh9 zCt-zf%a?`Pw{dx?)86|iL`Y9OHtaj$A%~Sdz6__O>*bQu-Z-+1<01K^iI@BmI(kun z*mjg7MO^gWEPnwZcC(U0dvC0A<@&My5NX{+6Khk|Jcv>Q&1#!xYb3q4yHc4E^P=i9 zfjDI^W{1IiM{isMhDeUL(mYi7i?KUoQAaKCM9QpgijhXaTWSgwx)Ooc=`EP1YRF{5 z3y1A5!mNW^Z$FlLEd&megD`J(8rHxh*`G24DV|1rR$w##v*Z`c{4J;9@jMM^>RBBh zMADtuh%BWs;UXd;5!o@-p^@PX`&k}?=mEw(r{U%0v9I~>)j5!3Tbb4K*0#7g4r**9 zkpwm{7Yl0e(B_b>ZCV;18wRW~j?qbbA7A1=!3S&rGPb#GGlxg`nd-QEPQ%XbCE60A z?A_K4k34Q&@E+z22gT2*qNCl3*L3X(&XAE_+O>+KFZS?ct_FMJ6G={ATv*;JxhKvU z=fYx7Y*)pefX^ysu_xel;1y$vlscv$M+1B!<+n@r=RX~eWXW~oq@@dc>M_sBtx*?M zxKdhs2LHF@{4!$fp|eHEyB`gkGf$Zf9bG2Td6DSGv9V20gVNoxL}r!6V>QRZPm)2A zzwfA5R7)$R>MDC)rD-tp`rc1z=yf_gm|Cb_h<8w`=8&E}#g6FUID;0yD6yvl$6WlW8M+I+Y^cS%{pk95CZ)Fmpl3C4zRMMYy< zNE)Y!>QBJs#B`?$Q>{2)giVAn4RB#=!+w+LnFU+9p*(;QB-K!xD=-0;rH~0Q(OCtA zSFBp0TK7C|kVoG(kr6|xX)%4=Z_E+io5KQ`Vu~MldYyRZZ?90(z2@jj1e^@5xyefF zmUBWcncVt-b4hI7rP4SD;$fSL zSCNMa5o^bXz94;Q(-}g7wZXP)J{D)=Rxfa`z+{eo+b!)F1enm@GO#(8j{1^uThDx3o(?P!=T56c&gbl< z%U08`-E9m8xp>oLFD*n(4Csx#ZMJ#&e>?N>E9s~&s%(u%^{!%QW0_41$7YfQhwX3LjWSCnbr*u3epTFNoodVBy&h`RkYO z@>dMu^75H@v~go;xOs>L#X~d7<}>kVqc?h_>TPV2*i*JhMi|pr!2xdp>HRaNi(o2O z@6%*k7L%d08OMUJgimzJvbd4Padf&Qm&N2n&K{Vf-$u`|NYR!KY$h#9i&SMX3HbBB zJfvd!GNfR!1LcUY7C{=;9=Sn`nS`uwF&^&Bp2MfVuQ!K#z@*}+^DjTiO3yJKF#E%X zdJ%t&*`VrAo~)$7nD+Y!QCa9~C}z-^CXT8x`BMKn^}Y|Yz|_Q5itnq2#>|{p%7o9D zSTO3CkV`|=OYF+jg|_#-f?>Rift}WdA*dXBl943aBj4n)*H)H*KoM;(`baN=jpJ4- zt3VO(_1-zIt_ov?0163_gHU;aUS|O~a79JxNH0cT;rR;;2r5l$#P`2c@@2YBCWt8V zwg3Rw%s5{R0=r7gmd%mv9*G4`>V(WKRcU0``aV-ITxqAXK8H{{mhn2QDC7c0>Dfpw zPEg(*y&2KmL zLXN7Q9ScH@U!NnglvI`jlT8g-nUAUMjgw^xmAZGFrKDU$9tR{SD6fx zw199{#y_(jan$d*P~YR^7`;GlkA4asNJu2)SSg=VwUvS>^J0zru$1av8smuXsWySa znxs6~7;A;)GY;5B22`eTy#UYOzA(E=e&^oTw8(@34R*NafHj;r@F4Zq&<$e=gJ7U) z6C*lnsD=v!qV9zeWY23=HyNkb!?o^Q2=Vy-w_7PR&>r*%FJoTw+v zNn0>uCqC|<@fxTNIUSti8gNr)d5EyDF+Dbq@&!gBjb;l^{#QGZoLiRg;C|BJXk$4? zvW*pB$bgs|)pgOunRT+&u|hQhM)$BjRK2=Yk~Q;J<2hpD8{I{Mov`qT1z?+;Fq3U3 z8dj9oyFp8WHFk{4#&>tSNwi1QYkYfIH%@i5JC*WyZW2Sr%!G9?&#QQGJ)84&Xm)m9 zHjRs-!p?`xy;kEv8IcWE9Beo7nKb1ddTvZZ`PjsV(?~Mt4Q#ZZI9Gg);`)q;J4%z}QH8$NeixU*eU0eJ>U2k|p+h9x9I5)0Ti-qAx zi{z9o-Y4I9tHQL0N!a6^*0Ztk7J42GAkH!g8i-VZ(@c2^QYRt3g`VZ4bcxveeP;>a z?L!ebWL&?9q%%iO`O>{uAAFF4E_{RFnOFeanu(msllH7hRFqW+iH=<#G;0nnwEjH8 z3LB<3>}+NXdJ8>gKs8rPm3oh@j+4|&?2f6Gt>EuiDR0G}qjC9^EHE-&q9!DDn*&YH zZ84I;u6ype<1eVQ%pGF-JDks7JGuR<{O~3vD=&&e&S|Ce%L4%Vn-VaNj%`1QF~++y zyqNaN;KG;~7GCs1facKGnpdqf?p0x|i)af;Q<`Oe4*OCf>Mit4z`;@o%Hx2+9!6gk zK{+8wCNvzm9rowA*S)MqlBjQm!@)*o>gafd0+^m<*~sL#}Zp8JHQ{foapYXsg#nLMg_YxRoQ@)yIDthOD0z+r z?<`uNd=zNuSlO-5+6+QNn7haT}52Q39zQk|X z6YV{M@z(%qOiQzdD?mNf7Uyq$3FYiZ4o5g7#S$dqL zJF{q1!u0Lpe6}9Dyp)I=J#QE1v-Q~LHNvXK>*BoNhr>;nf9|p#X9_3go=EBI;(WFq zr&5;(LRL!k`13rD)lDHCn9*(){R5-v^*EE5GVG!{vGFD&>1p$pWj)UKb&)<%oz*i< zWG~KTJz}<5k3wHD@UaG+{TG-QS}3BIzYD$40+5b}1Wqof#+m9#sAD}|mh~u7sn;M^ zi7~*jIFs4PdK`O5a9$W%@Em#`gj6)q40697I}_pg6nfq+&Vi5F=jf(23?czF{+T!j zJK7g0g*%b5G|N zRo+SCcV4;xslL7v8x7?%>8^2nOsl{f{c%k3br28+sW2geas+ylYcBo|IZsV5=?Z`y zKH+@$9cZanvt4{BDwF`s(xdh{7t} z@b4!JDP0vAN6bK<>d%TpAWbif8#x*iyFAU2vasy8QV`}JetN+ro^t*iRX(!V& z5lqiru;gn03`QLYf7mTKbiB!=t0$J_Jeog}SqA zh<^zvWI+zqeCukN_94GZ5nZLp4VUOW#OWd^nu}^U@^KG}_Kc6Q9Hh8J3!mb3264-6 z0y0COt2BhfV16KSp6B$}PVW*O!sm<`=3|duH?a`=@CU=Ol&2fhTm0kF8>`ZjfQqsJ zynnY5He*mpdj zsurEdEoQBI&KWOKJ~7dB0!2X%v_cs(l|+T6lz7C=PS?w0C8s7LB1Xhb9|L!&#p`Oa zO0cSPwun(1Q>ISDPte^}w4B)swy0ajT;^sL!mu~Q(Sg;t27(TgwRmIDF~$)n&uo+v z89hGSeDnRI7B7uE&+cqpP9pEp%c}z)XZ*O0@ha5NillTZes<+RT|H1jjUF_Lt3~zZ zd%Qm0t6CpSDBG8HhG5!pDrs<$a^%w6J11%yYyPbqsvXN~*XNxS3nZa|suyp}a8Bo{dRib{3)#5l8WWmrfgf4u3 zeJ23M~`-R7o zTykSR6?fP*#RW#w*)ZL-SzZ*W#@(j{!N&S^i8iu&obMZx4@!GiUT&{QmVmWkS6q}3 z-v%_F((pWAZN+n08vPFUQOuOilp=PL;d{2#&9lP62H0NPPqz6C1AR~3qD^DdXkJ9} zPSBSe2uZGk)xU8>9dQyip9PBslE;$#tkF^yL)LCs#_B`W6ULa)%@o?VDnUsJ=mBPRB|ZVnnnWmye;c=u=9%Lwj81XXG?r zhj`HrZdPv+QqXG0Y!5zJX*_`Kf*&t@$S72pWh|%`JLdp#EsE4t$>HUkR-4=)KO>m> zk;JN`@vv}}!gK_fCsq9jFpn=)(YSYO%>89VQHHTXA%Q_whvB~(ymU$6_)0_FI7JA* zynWrG%Zn%>wl{c5@#|%P7Gg2%e}U5Vb&}xT3<`|hg0f6YOXcl)Qjv$6N5;U|>h=7y z+WjbTTFE%s#{-cp>AA$6K$kur0qUxIyv8*)qz$4T0cs(Wo4Lk_rlIo#i+%*C@$3dz zCv-mm)GDV?sNunArv`Wi(9ES@KG9K>Ho7*`KGf8Pi`WJj5by4K? z`R6r9iV7ObDja^zk1Mg|9mP;I9?Np9#2z5mt2&DhWyoOg^xGO6jzzv0En-ApjTRnj zmK+ap)4g4HnDUXFy7DDIZjJvndzr8n=F8RN>AKr4wf8xLgPtGP-4>K8>DAPI>s}La*q5gxD&Se!R z1Uik8xiCFFo5l9_Pwbn|4;#;%#c6?vvaXMkhl!}HFN3wW@8fII9R`el?t~pxPl2Iv zX7TyPGxnPH1E7}}J^ynjtX$*xMoJ#x`45z_ND0|2rp+UY8K>GDs)xgeVJ_Osxowm% zPvniCIA$x)gtdEa2NVK28mQdlRos*W#adsoIWKAdMgh<>MQyHe%RcJcAjsFGF>qdT zfJHte4Z3;5v$oLc42y%$xv{}35MOp*UCTJ_6 zgEuir8qtaHLE^UO;e;A=$rnK!8nYsDDPr=APji-|wKyM7B-%_ebIW1#Rg&s7mmD<5 z(#Pf#jA()Ks%|b8W8a)gOZk`qvw-LfFglxf9#qnTm)jdZ^mh)vsXgR?*;*W#=rlgb zYBSEv2FWy7+EWSy$8Fjn_Ra3ukzh2g8#DsUb5k00(oeMAn#(k)RZ1_`I|HIVOJY$O z&FHbnH6FdTl0t;6H_PLQTA1Y^ENYn~+t~;+BfQw5F55{`0e#r26NNsM6w>gmVIN7q>=EU9M;-t-|7mN*ye)d>my;K`BH6YS_LbQFHXim|HuTj9O+2cMvEk+%DpL zjxTLIR9++cx6Ys;Z&ezLs-iYi!eIr3Hw!b;tJEG+st@VI@m}Nf#p_$vt|VF#VTU#~ zD#CWhmocp+0A1iLI7IJ-L%CK8OpQch0jsFjb1SVR1kke_*5BG@X!Xq12JYRzMM&6 zqi_{>gSaJdKQS*j?3AqJW0YaNusoFHI?ep>@ z2p?rAk7ufvCBTb4qBGU{iQ^R6)3&m?UMT*)j} z+eox~6pVmKKU}x)>G0b>yv*a`A0vh)7<8I|liH&XbF#x~FkiHeIof!t=CDzHPW@Er zYR8yQNoe1F?H}(HJXh{04d>`9IqPej>6x{^9-qoQ${E-M*S@bBW{-Pcu|7hcl~9A4Cb1fXKZcTceFzCDztu3 zKaD1#3I4*LxcBOyS1J5gz7C}drH!&mt4TEM3psb%324S_<_pbkgq*<|)(K^pXbO$0 zUN-`Zwy!Qi$*Xf!G-?nms!9=_9r1jm((-_7ozP54i>49u|F`YJ`Kv-KJw-r5rP6M@6F79 z;y;iIiG(QaKPz#>zlH6d7!^!GeSwBKUN_P%A7g%0g&ygorLeeGd z*^4UcN5ZNHg0%pyJu($+7_pY^iYAo0x$VT+sv<;0ja>i(b!5K)^f1rgYYF8e;4%VR4$IE$K+=h&zhYMKm86wTwz&>1c02nL!fll3u$ z;k7?_kqsdQhyF_YnG@h$qGqyHtz7BE!)_cw_)6t3-#`a^!2)>%H(*Tn$gpU#{H1Z8 zH^UV>eb4E+uqw-QsVRvf-E$K7$`q`>H0E6U7PXPQsl&43_(I-quE)#4xnlSOq|Zvt zU4=yWzm*BvTSoyg#Zr^Nw*crfG$3olqvo!&n3~tjDu(mI)iGz*S3!_DY>^ft8&S}6 zj>vf9OD`P7O!142$5*@~zTUq&sy*drlu>gzvavZxhDF~5=)|~`Z*Mfs-E$5BE;FV% zCndGvyCV!+&}oE~`b|7-^r*#a5Z8njU>v;vb7=XJA{pICm+HJLBT*$UkP_lGhk$V# z^yNw`8FQ+B5fHmr+N;?eSn#x1e#1e^Hibw{QEKrLgyKPdj>a0tFvfW}=dfh7cZQ}} z`9y09OA_k%WYoay+-WScrDHpJB&ky49P;r=isD8h$XGB&7kpOtEnwgTAD@DP-aPxl znOkK7dEY*HXz^$HgqQC5TVpZgjs?+2#dnwbgO^{FAZ1qZdwS7Vp`TRrMS{-d3aC&- zR@74+J&CV5GBnIp=!b^T>vd(5H5nv|nlm>1dhpa`&7c<;P0-t(Y18U$)$?>Pu0lUQ z^~^LIkff~|1FS+nPkN?2TYH|mRWdYe>|iFbG|bBJq-Xk`?m2~ip7iVhy!L$R=^llC z_C2RIbtsr%TRpRvcF!pqxb^~t@53n^`9vlR{^juM#tBt};awv_-ENX*ya|yO9Vd)% z17il9*~}p?9HL!nUq6eUi(O<6>kLStf>5&8P^AR^%2Z9a`WbpQ7=m&QhVZ=y!CNVL z8s^aR3c{8~BIXXPG;5;W$Nqwp>vX?Aa^t_M`)f-M0fN*FSSgT>~y^Se8 z|K&6TxdV^gBfhpa%)O2=tZl!Dx*H~U{?g;@q&3n}{mxl4O-~x8lj+;6fMF55-S@G_ zew+M6jk=t@M#F3X>mw86A^uKIj!HECv>?A}h^z_o3G?~p zr(+AAebPRy%{z`mLM)BPQpyYSP-TgrpN^=)$0bLKI#48XCOzAW;&5hfR-TvQ?!B$qi64Z!j^r5$iYmO_apVUT$^Jh++u@70z`XoIi(>}jO3R=c| z-#pZbB+sf68iq)-b^Oaa-sd}J1VsgGt}m>2bkgqi9W$HtXENBXFBF5qY7~97Nnps7 z^1y;vz^F1ni0;)wpgv}DNn{rHwp5{0>pIM|6T+XPTTwTOAM{8HSEd2`TTBDOvThvT zq5&Bz_A>%8Tzn8OEDWe(^-b@R|F)cl6kA2hmJ@}Z7$1g~5tosI)Ye!rRcBsBrl$Ij z)xiL>Cn!l5&*e^|8vn)$bR{;j#sed=UYuo7qfMj|J*5;%l=Ag-`~XAkC9;n+Ne&LF zpjWq(N5zA!{~_E(hLs^MB>rNsqB140@}Ep98QGh7zz*b&$_Y*87?Mn#k&Em0Qihu$ zLf-V=bnVC*^J}hXlgvTgUP^m%gIGOk-ebE$T-1Hy?G#ZVPUl|>)8k*MMJeKSAXc?r zjb&NYJ<0W*D1Z-Y6yfGRuXp(ysU0;=qxhPs@R01S2I+g^ADLnk(pQC#NEPpKqB8?o zJ;OmO$N$k>IvPo+IQ32uL5Gu_hLZ33;jrzZIQa&bB-+Z4$6jTDg_fpPc0P>b#Jmu zWMxeu(%u7WaWn}SH3h+XmJf{6wYaKONB4|)E#_MF8F>_-X_lHasWIp`b6X3(?>Vy6 z6lPKkiI<{(Sy}WrJLz-}@^5Oa$4qffPWd-cmhXn@6xy@cvaFZsxq7=NaEUlr*+-YD z{`k1ln+%v8f($3Hr6vz^LQQq*@|cPdz4?I*x^d|FUsIiFntlRx@~Ww}!k$fa8h3sK zUgl$f%T)8(ugI+AXr1#q-JIh7`okoz^V%7wU)iD|V?W7Vkw6aNF|Sgg_5efA6ZyMI zUZ(zA?V)ZQdWN_`3y_4RCYHRe`4^!O5t=rtS}Qz+Px!CCg=-jjeGeCT`5e;cYl`W` zu^cus3BrZQ1S|CwX{lP)V1%BV&VaBbpOpqn?BfkD*=WI+3d`07>WBp@7XNtM<@6Ct zFA8SsC2LKnO(1yth&Ab(V+TDtH`iH;R;@N8;`7Sj6k8H~_c?0v@t`+G5#8QwjENcx z*9UN3Ys;$ouw}PkpWsknqwW*xO|6jH z548Q87#!w`0M<$OE~?(9aX~4&Dml=uM|Zyitn?yr297G0ntq_0D5Z1gVcmeo3p^$Z zpJLW(uEmbx@|mR2S`~gW8lR)euu!NJ<4%lG31fjQ|H6pCs1Dw%jI3jaW4*<56;Pux z^3*HAFH%^H%q^BwM%LtWsQxfMK9!ExXEWhFUCXQ;CR9-XMv9~ui-?+NXScEFQgn%3 znbjUDBdeYi@+b|h`i{gs^bqCln})HkbdpuZ!1U}l{}euLzFl~ze0|f7it6|<%>;)I zR%c_;*kWRsZ-p!+LaIoSr=miP$&AEYWO%NOtl{Ktrf>iGp3tqJG46D(&n%jik@aks zkJ6|b+2zk4-m-|=yhCy(2W&Z2jF)AD&4M#RRt+peybGOu2bQ4r;EVO=Qy?UV3w?75 z*<$6QNKwJdDyMN!#GPMtT^1iXtwd+gip89h1Y08e!|-BTiEJiXJmi}qvDrG1i2eNK zrr>P8+jV0|2TeW0q95$#Bry&T5e3&@dgoL$di%6YijhPmVbS`%{yXcMy_6yoKNJ|v zs#RHE912MpNi%=`Nc}}jyvA08$Dmc>;CJEnYkQGNw2=P4sfwIkdM%PaB7Kd-%lzdP z9*TfTD&R`jMTZD zxkpCNFftnB1QDKuZQ0(k)g?Hvx1#oIh;Ohb|5P=~7SmXq6_XrFGVMV* zt#-#B&oWE%jc)_t;s!J>!Opp4zP8__Xh;#xrQ-$8Wn4YJcD~8%)DY)Uw~VVjJ0+u( zK$>%XA07ir<&F`+RL0eu=;kDj7`-r~9JoV|n8}VBtrrqREi;xdtE`Sq@K?vC;4(`w zjfU+S_oFZf3D9Un7^uY1mbI0Tow;+TT^*Ys_N%Nh5@Y7h6K6mbR%cFJnm0ty*i zpPL1)Vo_Iqo=w43EbK0OgT}Nbs3I;+QE8+|^hm17!j)L&*o$W3t8*2U?YEI5~Qfh8J>kynB}q*#}8X)Jk22dn8UNle|wfnCBW{1DQ;oOBbPj|kJdVr=2%O`bpuyqK$r`3{v>VZPMAWIiZxA8imKw<^-%@Ex zk97{E6N2cMC+KecoDM38>%L{>oey-wDRK= z_$sm&O#z3sB>j;(8ko7A#tVg4W+2OpHA^uon@B8C6cu-B?(qv4=8>i+nvF1U+Qc07PpQtBjcMSgL)K!P zESM*Fo+QTNa2;XAJz2EI6gW?!v4KGJ;^b8xh6;oRnGA{?)W^|}Cqvk2}S#^QN%R8#+ zt6x6eH);o}7*p$g`*H%v(BT-EwR;+pp1A<4g`-HLf5k4ZyS}oEwLB%J>0OVexOTmW zSftX&N7UEkusXI#hrP@O5+o*+R5p6czOyi*N`=cLMc?8g8|q*+b?TMZ-l53l+j}+% zDp4vcoS@T*DZUvlLLc-9hvC%Ah+oA?qOjChFB>+4Do1}UAtR5HlPtBU0-qn}Izy9_ zi^g@fE7%yVY4+CR9HL_}oVhT*LTm8gEHpH!{Ogi(!{L>3clH#L((2j`S+4 zfpfwsI|Jv#4g+GBO5-S6!t!g1b25C}ZM99@EL9q(PbW)uLxAWM;-c%|YmYY>?zHz^1!Y2-t zB+62|m#C)SQF$=_y;d=b$WNWJiqrt;ZO6o`g>j=ky{6Dnp+m9@jh%A=JXG_U^Sktq z(a6}bMO?@;SRj)}LUx%lr=ymvDu2gwE;Jb^E;P_-T-kz0QRTrVX*D2*^c$QB457>d zD(RiWG3f+Bk>lwe$J|wUc=4GzVo;2ooX~wc`_}+{lg-e28)wFI%I%q^I+y1hkffGQ zCtRjYn0$d1kJ%P&1IrSk3Rtww)mI+RF_o<3fHaFiVjo#=G4HY4F`T@-M5Ua4>$?tk z_yp}t!(18Xz#D;lg>v+q@-5D7FLRAZYY?_N2Nm{_K?QE#0FzP7)5d#D6l`_T;qC#5 zl)&7OM`j(|_YfeFLBbEcD#boD7V;5pQQ4f*IXI1Z|77*`%favE2ATmA6-u*GaygO9 z&%rqD+Yq*9hq|N`>7q3&c_6!86DvZLUskMBq3rt(u*)ZB#EH)cM32 zMUh;d0}XRjG4YBbI3;feLoEv%LGvox0TRgRm%61JM-}tX)8XM6lK2%>jGp4DjNe6> z;K%@NsVqJ>`5C-h#lK6!+u00D2NCrb`HdLU_Ma~sC=%5-Jt2dcifeY37*rTBxf9w3 zPBWKMa@3Z~nk16Um*u#|=xdOi2<;}URB+#Qkob`YW(Olm7d(D!Vu;>b)<~GYLuf)i z8sB6`n7v2}9IrT=<@#XfxW{q=GsAP(L(#$E!SIth1v8xiAH@oG9MEM7)>F#Z8#He) zqQR8K)c9-59ndB{Qn)!=d10j!T)j$-X@E8=WFw-J`HH2gDWdW{AOUII#&mQ3)HvF+ zl@=)81_9t5dDx2U#-Zn9PoA~?u8;Sj33@f=$Q?sh0$p~(QVQy1sZAq>*X#M~#VLQo z9U>KtWnX9h61`Q7`wl%f+t>)aS>%|_a4XLStd)Io>B9+fmgGV|rj@SWE|f(cj_iB9y z)ukk|PHu#Pxt;Vb1~XvhfPmosnTq7aAoO(S#j6;HaCK~_&&xl`GLv-?HYg+Dl-z}violo=Hms(G8 zZNP?|vvkY`!PnI;7uiH0kR!_&8c!!Z6BFTf>^hkzS-Qb1$mw>^uSkD0QNm012O$vI_p@bJ)6y$|nYt=k zBW}grb67#a3>nQfMIzOe(m&a_mrTa6n&~jO+2jP7HCW3h+ln1-W+)&uhJ>=+1shaU zR$0C=!L5Ka>SSI5Y-RM29~AAHxxaiTVjN67xXl}99)h1%3FP0+#_EfUw^eKML{s2BQ(J;#@(n?Y( z#Znaeh5PARk?46F-~-GtN~ffKB}i*a;%k+H+~GKY+Q#IVj8_7f`5Im~ z^t{3Bg3OxqWl}=Vg95<1rRjmddc$;pcnv+zM}Lft;vUXwH-8k?B%o*4gp=X~kfFB3h*;QY~s%cuBXxsmOkkf@ljyi1*B)K1?PR0>a zvWGNYxY{G#WzjB^Ql`{#TwN9|Z_P=6C3wB{0p%QeeuR0vVJ_O4i2WRTeqfAP0srcS zft8WgX2A$*Qo!|WLaa2oG~*F5RyP;R1J&&~#vt11c4I@&oZz4fXCs=R>n+yXQY-7L z#?Pi$2g|Isp2^Fz8ACP56 z=0|)n^qgfBKi!TDz?CAgQc+MLvLg658!=~$WSAO4gCN6}RXiK&dbfmXJGUXs4P8{5 zybN;VF2&SJomeR94Lu=2{_9*K&KlNJ)2*;fJ&R3rELEzxrU9h2ACa3uP0E)#v8I*i zKVYUP6uh>5sNaO36fo(=(Xs9{3AU{inhw_DnPOdYv(;F<4+X4g9dWVoL9v6n9R)C( zf9D*yoFd2Sb;uBQ41rO);xyMOUbCNcI1I-Ojl}`Dyqrp37P60e369PUNU|KfJ__~2cWVRW97K}JyCUuQ_P093^ur?~N$}(3fFfuH{D(vK7=_dp@IK%_y zP0No^aanYnMC?9d2;yOJcjPb+gT#6Dc+2gJ0orkQu*_;&)lD?!rtByZiAP?GTh?&h zW(STZ9D^7jbdR9$h0s79a&5HD0p4z4asbZq154Sb)?19A#xM*<#vni~hhy3>hIGMg z31Z1}QE}(Y&DZ$qlx);PAoXIOSM~qYQsv#^-%(wgaTedCOH`uk)RpK6acS+=Yk4Z? zoBtuI?Wetea_HN9NG0*kA3gXtzm2i|&z-x|wz?lTG0XjL_u2=;k@I*nPj4TFg-)48 z)q8Agns<%ipdIXQS<(BOkhW`V41Io%O8Rg!bF&4*09%frx!X3*_a~qBneWZ&NrUaB zw#SMr-CpHof50=Vvl$+SB6X9~c!e>W(*C!5ors2|>v(gIXBD13J2c2;KHm4V$1~f2 z-iGEYrHKx&-Q&5EL3GS@Y91dN@A1qzfJ8nRM7&8ijqAtbpE-k-?(*^W{z^L{5P`SX z=~oVTW-36>Ll-Z-R~oN)j(ocR?OrEKyhY~N-`wMwP60j70Uqf2HEe75rqJ^^6y7S` z|C=`I_PlzZRfxx%7a z=TwEg_dV_LjI-kBa|RFeoHSnhheOXp8pw9BxmiCR|CDhOHY(la&v&A zNq2`QE)p%i81~;y>$PdHyom<5Q{V|RNYCALzE;!uCdXr3bcnOJc*l*V%EvC>=9{R8 za;*}k1dIU(%q|Zt19Z3@kGI;jlA~ox=4$oJ&#kn(WitZfL4Mv2)a}ltcuP3c`Z4|*X!YQ7ejDoKq9cv$_GTtz z``d8m26Plg>in^5j2_xACElEeo-+U2o=tI^Q*JR+?QdpEw~ftO)myO!YeU`L%q(pi zn}{j)l=QgV%v+v(`mlOS_|LDOE+`{>XWJBBi_|t?ghzDd4&hm8yuF!O+Wz)*>j=bU z9UF&QwvFMoy<~1B1nh1eW-!aYb%!ID?5=UPuWipTFt=moq{Gd_3??~UyKp6TY16oV zJpO5~ky|Q!{rYJ;I$ia)Y4CiYjjZT-9;6`lncb)Hfi_|v>ncA~Nr%P{zjf3p5_lUQ z8y{vcOX)TY6e3oRjSn-J)zLxXs;VCwA7(HFe`#KqtV-(u^Zm)EbK~6v#{T}9+i_)6 zS(^72y89h$wEUdxf$JxYx3iSru+b(aVO(odH{2{l_r?4UHd@$D+KTm)n}y3w<6xur zZr(AQPZ}3`HjQCm9z9}UTR&;MUBUTnQ0+n#zy04olb%zA?$=L8z;N5&H?2Bf8`%ru zt`f71J*DE>bJH`~7~EQZ1!?uGgRE|4a;S!&-!z1<k?_ zJ%7ITH+0+IfCtDqDNWD+mY*kW6g~eYKR3*o6FYgoR+P{D7Sb(HB0u-C8leUAa^YC3 zqx{^gph63(hkwb>6AV+7PgdLLpJjIW)^D0xh*-taw5HX!Q-y=&T0V8|#0nZ4(TC1Y z9YW_14U5L|yIla{wN;G#IG@SCS)X0;?p8E3ZuYY62IE@aX<@t2 z%&{f0p^>0T3E``I`FZYxt=&UQl1OUxRR28HhOIj3m!iF=@^i10aW0+gKaE3K+ux>q zDi(&&zrC4JY=4{f_Po3eiE4W@7~cLi#cf-=jqPfCGg#jKHfwLaQWEgCzge@Ci#{0x z=Fo+lZdGV)>1i~1_&W~>k{bQ~>A+0E^V_yw>;F5j!Xw)Cjc4BRX6A7-z#Poysjd9Z zcQ?aGwv8>=xoIXp)&AyT21|Z!RuC7ledEIn)`@u7UB&7g8y{w{Sv_#Xs#ZPO~u*$uS z$ELEn++Vam?;6U3xq0mRr|}%`vpPX6uP%$)@=GOYoWFOgq}^R%WMkd@QXg{s>hzj7 z308B#pO1N~epN!x+5g-$lW#n7-L^+=plkn7D}nOZjuZXkGy6al2x4{R9E)A=m-T2jR^x}rAXQ%&AKKJ;^$+oX?3d^^@EpL>oi3S{Dx`B)% z{p~uxO)W$`v@*cipnQ~asXh^Fz|=wW(y+`MUO#U2g{b|;fcOxP8BBfVe4;_=TYlyI zx&FG&Lrsd5} z)=JslszOgo!HAtFOUF9c7oF_1l9T)g}aRy)_zdxx1TDw|96zqG)~QYjZQivi*&} zO-*1N*7i3s#a&}l{N8(a!q)aTgP(2Bk)K-#6fJYOdDvkiKhNWT5>&4}<@=M*JRl@= z_t($V^CyMx`hkr?S>X!%#)la!`ME{niih95_ArAbKR3){RZSQJ(v#1$0m-ZV`=^VF3Yplp4eAcG zQSx)sMjZC`13b`1n8&`&_>D6O&gV3W`~UfZN%U* z*;g_EtuUwO!wi=EJh5@4=V$WsbWll5?ylz}(<%A64>bvH+BZI|!IGbQ2CL&`fO+uA zXFB73O)_}WX1UaUqEa9%m+}RFhPo1~@=NBm`qhGKd`h#$0G%4HZuPH?{5*H7cEzS@ z+BV$D&l3AIo)fBLDU9B$|DF6y1Lj_5%@PgSU{u4M{7k#lolJXoLiTGwR88Cdck=V} z?MdL8iUYfbJNX&5l))O?@$l;{d=wx%`5Eev)#@scMCdE(>ehm~<=3t0`K$OdC!(#y z%sXUh2uM{Y8w^)-I9LJM$w|Pl(~9tSeMs6zG+}1!JEGhu=BzTL8zYp zJ-{vr%D-OY?Q8vBR$4-c@@t-s6NA*B(@uUqZRF=^w@NU&&uJ$=pLX&yrDjuIl11<2 zXMu%VKuUh@RP|Mkt@^J1ck=UTk)Jy>O5|EMJ?-S@(@uV#T(oLW2u#?%Iv83ok|gZ& zP?Ic)n_FW2HexH(jbAOeCXZTwpl;i6BR^NN9|x!8Ch~#0ZNr`Xe7a5{FqsW@@-x(J z9t-U9zqbcYg@}&M2SMdE%eT7W*EpvP#6?+7A;9}t{a1-G-QM|cbb2Y#pm|#SmfXRi z)}epi^Wi^Tt4liNepUUW&!_t5;i!3k5c;0!pWDKF4-u`ClAaGcZ2Z$IL3>Pr$E~dV z(dK05VN=|m)oSh@d(M<@8=JNERjXt6{^mhOD*1UZ#!)eF=_zk{@@W^)RG0ew({-F{ ze_2}_kpJ(%D*1VwyDO3B{$^%rGr*FchY)xVYu?`s;CHu(xj_3bd}^PVm~_JWf){&vOP#hS>DahHq45Yd$76a*8f>@mtDhUpe-X+I^^K)R0rE-GU8Xb z_TSOfa-wOOMY-O}T3LT(^;CKw`{369xn7&MWOMB4t~a#Dg5*2_A`Pm)`c_D*Z-pbo zKL^b7AAkP)_6j*JBdB&vvN120HmhZK!=y zh~2K?I?!IQnT>p;<=8dc4Yci6LQ=J4FxS~0uE)=itw6g)UKwv={_&01$5zK3(|Z)TRZ zV3>%`j3VLdwsF2c`OE{t|MGvX;8u+BfB8SR?`0*G-@*7y|J-RO)LIRUn*kLg+5U#x z&e)M!X@|y#87%p^x3-c+c0C_vuvT>Q=l|vZ+@Y7lyJ6`$AU*lCTa-erPi`JBwKvQC zfB8SRBaob&>1KBC$20%uxu^Uu|L0cVsD*Io`I-FO^-a`dJvPqL*??8@b2Ib*<^SB) z|NH;)f1caK|MGusU4gP2fq8zQ?y3CTv@!gjw*xw??rf!hhQ3Y))%3b;d|2I~;Vw0* zz{RogVReTr+d(Di^Y%R-C#mG;=KRC|c>~5X`MK@g!vSZ3Id6IL=^$mi`^nAYrOpI* zfAoJg;*IQONaW{Mv+Vt!OZ-Vweb*UuJk};`N|^D^f6LEJb(LEESNXY7xA%WO{UAR# zZLI#!5-e$8I-H-myPvz%%9^Cpr|13Wb72JLLKmS{P zp4bTa{w61diwr@z%m z?z89O&j83q(+z6s1JsnHJHuUyLD%JVfKTM-zs+Qh{?F@@jQ?~0nljSX|9RVRFF*Gh zoc_<-hO1Z!?vkJTh`0BDmhf-BIwPUjt4=zThH2*|LFhxQfKSUq}1_wVx#!;_U2&*!`8khA!VE1PVYhg{1SfGPLD}qzG+-P9{)@s z@PGL~cL>$}=>Pnf=LBtZZvXqy|GAQ&?8abFogzlr#4XHL#=~!z>STdRXC518{cTWL zDz4^rNB`%SgZ{ZPs7$Te(f|3SPLtc7t-;P5b{~vm-h4!HU76cN&&m(2`tmdwrLy_@ z2{sP2k<(>okGmiJpXGD*>;x+hhk;LQ6o1~{Jgn}LpL+vJ3F&=Lht(bVxnp=06+ARP zPEzFOxnS={dOpa{$t5XS)DR&CGA7J<%acz>yWjtp|8u8`z90Rcv%lCb^1r+yBT!bY z_rb0Ivx-jJhLLHKGseM_u>C6AauROGK11)!}!2WHOtZeS(c?;Lz4z|eLzUT zmFlIiiX68+F28z*o&L`+A@Zf+J}gj*{icng=Pl}Ltt^ibEpgGdJg)xFWl0&6oYm6b z3MU;JpE_mZlM~r!kmFW=Ga>ml73{Hf*7vin~w zVq5=bMwZzir`2j&tG!Ef+*kkSmvN@>qRQhmnno?C6noS2^nXq!#_nqgWh||C9_moF z`|y8W0}P+W56(5X^?&{`ez1Kn@6-SJpYem!rdj^ayCArI{NNuawNrcD-`~SD*N=1A zhKESKUB9M+ef57<0dd!GBX)Pp<2^NjvhCMD{f3A5!D-1x3cE1+xc%y z<=*gr{>6WzebrLDbn^)lj94&dv6v#Ur?-jW|Gd4qm!Hc;qthPWAN`+$wC%$tqLUlk zAN`*Tmkk;){C@^#-5>m)58`Eo1kN@1=>ME4-GYHxboj>ofB8R8j`Y9$pFjR<>}+~u z*Nrp$SMEAM;^~<=GjqI?_^}KOB*@-?vkS7Yli!|gs>r7#ic+`Uw(q_Dq1Lb|iY&hM zR8@4{IAZ+QwEKT(h8wPARkw7Y{=>%){pXLbZQdpqwdI+f5B4|4gY5^CCFA#*f1dl< zW+Vv7-=41haHh#(bXLi#k@scd#6MptuCev#cU82}|H{P!1oXGWKYy|upZMp=7Hup9 zZ2a#N|2)~EO*SGv<@cF?_Q@}PgI%Dun6ez7`R8fyyO~0OjRlZ;-6t$e@pyxIE+sG^JI%!&S`5VW8=g>PqwIKzq3)tEPCRfoA5vMv#;y1 z(6@S=<3(s%{&=u;SIkIf{&~8BZeYdyeCD60E9jzn_$JzovNgjm(8VJRDv5uN6?WpE z?X=cayY1C8IY3DM9{6WF&>A%y*bE3Tl;|rn!`$>_q)6C&)v9b=diCF z)abUG48!%nKU;$jhJ05Y%|^m^yz$R=V6zV6{9SYGuVRzl_-8xNF2&yBE<|o$OQa97 ziazc4?FafO8(y_$5o~A`eCP%Ljt`>Yz`a=r3-Hf&;K7hT+UHx>`OWO^n&FH#boN&c z!h{UI@z2)3t$p5{o_EimbxoRBX{#~c-m?Q*GyH{{Da{hKaNPckR1a33s~zl?T|Zz0 z9h%pzzZVo-JJ#Vf-rnrz&E_=)FHGWre`XSFU*D~M_b3G~y7AA}CpN3~^KYg$_BruZ zzu}*Kri?@TuF8K(-$dDe8Ay$PwwHSkjIysK$ng7DS*+ zsPXX6W*d$CgZs~iuf>|u_~(O=?B>XDpw0Xyz4zP73)u`0+n|mIVkB<|_wnq@q>f}E)xF?>9k1vGiJ}?EAX+6-aXMf{u}I$W|>$d{uvk#1uo9y zf92u<0=h{2Gc=1O-yhH9ZJcaTUp83i7dHWpKWG{ozJ#DqEfW7slAlH5pV`qb693GO zev$ZR92tu*tosl2=NAvm%jF;uTpZWlLS|VG3W5*jNq=s;iB?s^BHkqa`K_;3OHWx8 zW5drpT|wVXunsN~{|unANc=Mp?_$fSJ%(mn+5A*5K_rfYpY$Cr>_y_A*#RwQ{@HqD zv%O4*@mKGClWsMq?N9rMt>reBhXZ|<>#}t>iM`fWdkkoYf9+sDTPL&?cIo`H-CO$^ z5B75l!lE$O(pH82z=Qpa*WT>s?zdV7{`p`(3%uz&j&$A$gO zo#LajKdjwtM6_SPGd?2Zq`TUzuvxbHp(fs29y=>2YP$;u-|c?}YM9kbKJ%sX&-U>v z{u$ie>OMs?{J%E;UfIt$Ko^OB#>2Zv{4*hei^M+@61YhGGy3x)@y~=_E)xG78wA%I zZy#|FHjnjowDq$ub=#2j*ES5OH?cH#_TfXnN-@6uVEc!Vz@?cNhYx*W91r%Jhz)|< z9>))-9Zp-2S>m5#HzD!QA%;l&GosTX@z20ZR$Q@l-EOE}#Sg}Q zP~xB8S-U0v`N33RCWdQYzR3Ik?8U>Enf;Y71^DPKcZhx74|>i&Qz*@9fVUrL^XO(j z_x+Urjh>@qQ??h0eDK{u!2dk@)9r)JgU;H*AxUtbR)UG9kb+*csgBH_xCy8~+@7 zoy0$*#xD~8jE8rT_-6{lSV2bb#8~iga`6ZwP~x9SaJ@+UGfrD8LhFAvnqfn9iXZf%?U4o4ivCUqpI+OEi1k<` z{+V()i^M;Z0(X)4XQJH~iGPk|LgJspVI=X-Z;TCze7Cw_9k_%=@4<;*{iwioR3Z~FPbKX-W#ldbKz zyU7NRWHqVzdxx;S7S+Akc9C-5t3`L8;QrAe?S)rVND7C**VnV zPj{I%z7`YX;`YXo(cWQnzXVOFMcfTEF{@L~4e!vz&)mboGpoSeb^as&ydAKK%B|!ckuO$F zemkq1(*0a+C-awDUjy-4^=mu0J>1yo29y1}Kl8iDpOLJ?>a4dTyZ^+W9`bKE1Y_SI z_1`k`&z5U&n_m2D8(SQEtKgqKbE5xM9=5+2UV(Bgjej-~a`?z>w&9g)yWtLg$v{$VNcHEuDY1ijw1>HpkG=koRx6uVv2vY)s5}V3&kL>KyYwTY|XpzDW4>+ry`RAMb z8?l?Po5udGTNwVVF>ySzG0wC1K;}mN`7-~8-|Yd_?EJH9mz>M*@P1aGt*xFW8?oS@ zU*@kXY^6K@JYXpY`|m_-BtF-0yL(KRCP!C9v_&M#&5x9gRTG zzY&u1k$--Ye}l;JWy}9;tGSPgcs$H?y}b(OOXr_&@^8Gc4o>->O^QAIP`iG1{+KsL z{`tB7jMFyw=a>06G81FZ|2&%RjzMle&7XbOZPc|f>V44OZk@3|qi+Z~v4LifU!O*B zSh})LtJnIg>1>1jZ2B`c5|{2K{|4#n*O`C5%)fz$cb%bw%lsQaHS5ejU+d4k>(&-X zzIBz0)kpie<=>!c{5tc`m-#oSQ?ojdzF!1C^U;3(X!dj0MORl*_JjTT#e)sPD)G;2 zzq?!6fc;!9b^5Cq8~)m!vuLg}|9qK$=ASR~Z{V0)Xa4zPil+Q<`&!o2=HEa$S!e$FGXKU$v{@R@JtAY3_-D}Z zb>^Qh^KantTW9|HGXDliGS``ZzSN&lFV~rW{+R6N566m#;az9`xkHdK3T?*EFNUYTg$sL?_-7Jrtuz08nSTSVU>*7Ar};O?^tcB9{4)PW zjOWHbcQYsS)dPgkB0z~uT#x+oll&V1SL@C{Ptkaz8ckaDwe!!D{(RJSe^ zW3%`#^KSrLtt0>ZcHookHjR@Ip})HH?vI!GH$J*kU~S?~7klZti~JiHfvYP&-IRQh ze}ihdYw*wJk&Z8&^KSrLt-j3J!>OHSs9` z>K(5Qfb0|Ac1GVpZ4cL<5q{Sr|7=qFW)axo!4{LfVRdgWj~z7jH%7LYdJ}6~eed#b z;Ne|;X&xV!`8T$H9)8W!{2SYuyHfT#|3=JpUA-FHa;)FoMmfQTtHeJOa?)KB?-m#Q zWeF6&KfW#XqpKkk|J*mctHeKtl`HYjI6zm4e`ZI&O8hev+$! zeUrXQ{Bz7m690_5VQq@rHb!lJn(XJsKZn^P@z1y$R*8Q`30x)qnUKI$;-B&2trGuC z=q28>uw1roIX;1AStb6N1lOy?KcjuE68}s{;41OYgaob<|4c~WD)Gqi?Jd{|qv>O8he+fvdzn1E{PL{|v;tO8hhB+HvsSjb{uhiGPk2 z_QXGrSc9#$w#BttBt2+omH6k%hF9XBqn{=IIhHnwe-0y1;-6vTSBZbdi?>SrbLy^V>m2+ zFgIrCz;EDaUOWGMlYe7Dy`1@H&(X3gDfZlDirALU{PQ_WhTV8EBhCEtx&EL{1~KK% z{Ii2Q$7a|<5thu%KU?C_=IriN%|9sA0 zXZ=i>+I8liTMDSL&Au^za9hp%vyG5rZ}{G%utAvl=eH^UbITz`NZ>m2&u>@!1|fm# z&!pE~)*mGF5|ZoA-snD{RpOsXTD8vn^JV;CC{#25d|5vw)|8olp8YE=n;#*8>&!pD zP5N`I-yJ*pnSZ{#g0Z8Y`RBK`n~bisap4=raAZjQb1V}Q{~R*Q%s*e|-v9(!Xa2b% zpW}G;4I`my>&!o2=HDPBaGm+*%ld-=D(lQY&rYu<$q^E`&ir%ZamP0M=8N1cz=m-V z?Do5Qkg$D~_-Cruttb9@qJ2%1vXul6!hwH&mcK66!Hs|RX73{HA^*l(7$=Q?KA;`V ziM)T!)xR)_C;s`6f1^z}?Q3X^1OI%;ztP~ZIDclfBLB0CYw?i{b{jmys}x$nnSZt` zwjbE#RYKL)1OI%;zu_tsU)#DaG_phfjY;3$y3SX!yZm)vQR~1zy9=uu348uJcRpFV z6=wb||FcV`9NNc&{Y7^W`Fmj%z&~&KH_ROpU+Qs{K4hWbIuE<$-)K`zKY!;?{Ls%I zFzoffKer^FZ7FcYsDH_K#PFi)z&~&KH{Nzsy64}Bd1K?BxBMG#-7Db@i2NHQ5?|q; zyEnoQIA9ZC%|vpoNB;Q<)(oAsZv68h|HhQRjuLEZ=bsN}Jm;@_Aazdsvq@pY91{6A zwu5*5c|)W9cOFZQuxK(R9ry>I--h%5GM`H$*w-`vJfbPc;$bl!_~+2bTRVod)G;w` z{PU=h+qD}Zjm@rW{IiYlp^XQ=+TQ=JwUc|R#6J^q(wl6?vy~n6jQT$|R&4k_eyn_8 z@q#L~*gs+RH2&E~?z@dk|EHE>DUO_A`L-+cQR?z-^F{Bv#y{6;(SGksvb@$tdD=GM z(^xd~cG`UNWu?As3(djyhM5f(dh)Pvs`tfU6mQU*)64E!d!!F9IBN&n#M}AjFZ1lR z*4am&Z^kI4Px}F1R=lV~0^J$#`RxIbU%j+qckvvy2H3X@ue+^5oY^J%?O{!C9?Q5m z{@R*NemUzFKXUwVN=)H?M#Q&tGM%Q-$cA=fq@~j;J)tShfX4%yW;ErzkFZ$}T06~X z%G-pdG_;#~mq|iX*!v_2P2uiK9_hV5tRLsDbO#@EzAbKk=+MvYYLBn&+mJ|UMpIfO z&Ed;-1(|HqjHX;}4WPA?l$sA(J4vbepyGS*!PdsTUk=akSS1Nf!O@UrH05$@fR854 zXv*c*09rfEXv%4PyGsjNJI!dy_12)za%o0WF1H4NI%!5zF1H2do4>23yxjTZ3qZvNecBb3jvGYz?|amo_xz z<<=m^M*A(T@<#tzS6c%dls$y&ZfgLSl@4gi)2%^w=%gJ@xuRG|ZH;d?1)B0QwKb?! z>S)SMYHLK!G@>a_BOY;_r9e|&rndg>IC^Smid~suGPu|pFmFs~%9E`@9IR{&a6+Yy zruYmuG-Tcy{O)_q)X|jTK*wi%==Vn_W+=>;#?*WpKEr`qR4F-XQl#cP;4|~qfD*&0 zp(%G;0~Dkrp(&6^-O9eTpF7WK2EPBAJNudOETJj5K9hu|AigCDO+k%G5}Fc2R6<8(9_b4rnHUsZUmBwjZrtEDdwvft;Ic1k|Yl)(3IBf{ftNZ z8I~+{H092IX1jDmQ=XKDfd))Fn)1qiX0_^Q%1vpQ&w*i zh^7!H;9oyV>j<-HM^j$e&ul#cO?hoUN1NTFY^K@nTsR2TJ!1%zS{l)m=k{}#Pg9^N zukB}6*oLOu+0XdG(h*H@rC?a=LR#Mrmh9(fbJy!`>}M>-8BJ;BA6swsE^Wj)>`fZc zl&AJ{OpJl1ytbdC%^pg2Yd^Cb_nXAqXI;jN{aI_b4PmC-7U6VjKeH{B(3IcoyU%Ef z55u^MW_`KxK)R}QMpLfzX9oX0nquQ2l)y8ZGFe|O5fnr8jHdV$HMZIHXKt%An&MY! zJlG6B+*?UPQ(}W4p(!!RoY54vTh{v20O(Vy|arW2ZSX+KAA`22kMv(pvy?K8A? zlF*b`CL}ZkDxRb)@$0=w7mqLkB{YR9BuPS3a3dxOO@Y=<5}E?7og_2`T02Q-3beKb zetB0#*Vzqvg{DwoD@kZdD6SHk5)LB?O(B#lNoWcbagxxKSO+CE1zI~vXiA7q5}HCP z2J!~QMY$Nu{ua43lZ2)`SSBPiB}69)O(DNrlF$@bvLvA?;UARH6e^@A2~8oSI!S0s zXay3Q0!5r8G-c~L$$nc9K%_ z#R@B>=7Wmwt=NpfXZ8lFnv|L^Xcj3oUo7-eYCdS~B&Ftq)=pAtK4|SErRIayPEu;V z@cv7w`9g7(QuBqwNJ`BYAdr-r4_Z4(srh0Zlv48rpdzK_14>L%YCeKZ`i*iso)J-{ z)O-{cNpotxOZ}NdvT01s_f&rdlTMMEuQhv@WhecaNk%G79{#~&YQ87>^Do?IduqNH z`ZK{(Nh(bq-v2o@-?jc67sd4&?**TXo}t}a*Gcwsbe*C<$D+BX=DRFS-qCI<52i@X z_fmfj{7foM9u!>bgwP#Gt_gsI5z}Zvtz0jXy-iXwEFZE|~wI!)EIobBon40go z{>%atsrg>(&ul%U(&VI?OvluG&-7c> z*Po-!O|P?C>J{1@3YAovJlvF0Y4VSLclSoz+RvS4fkC<9Hz>1`=hS?c{4@FG@(J2) zq+7@?E>@d3hKDzw_|Pl<86_~EP{b?#nT$PI;-8tHv&28cP-TgKX0FQ;|I8LIOZ+p! zLYDYvBD=H1KeMCH68}t4PnP&+3gu*pem5@+GdG=CM$TB_-6{4Wr=?#U?EHVGtuZ- z;-3jQ$rAreA>b_W&u9htoSM&8sC}c*`3;JKWr=^@x=ym6yFry(kpG?j9E+yJKa*M{ zOZ+q9UM4l)>(EFjxUMVQ+0R&CS>m7Z{%47Qj(J1kpULrm6mcbO&rIp#Wve~w8;;-3)~ zvcx}w`DKZJW=EeT{+W=#Eb-5=!bQ$2$c9|XqGJT&#}-;{4*heS>m55 z;F2Z&nUKIN@z2oUS>m7Z{%47QMsdv&|BS;ZOZ+n+P?q>-LIShIKgT*K@y`G%S>m6G ze9033jB~s@5N^kFhw-w+KjXB`690_olqLQ-cO@`O{4)UyS>m7Z4`zvfCgdbb{4<3Z zvcx}!Rv_`uUvW_+{W-c$v7fiDll12d4;-3-svcx|_!F3vT#9L*Fe+Ib9692p%EZNVfJv}ww zP5j^w7Nf*Jhs7xI&oMDd{BtN&Ej8aA|BRb5OZ@XYcelhpQ@l7!{4-@%vcx}=UoK1h zGalY7@z0?IO8j#SQHg(!`B~zhVW_giKgR|^;-6!SC-KiAESyvGd4PY&Tc)D;#vIHq zOZ;=}=%v(rgal@Ze~x8B;-3kwA#6QP6DDlq#Dp}&6Lv)h( zXPo1}EO+A>>r3LF@$hDee-6<};-4vGmL>j~fQ2ma&*2}G_-8^+vcx~HNcR%|99n_I zKYztVk?dzgr>!*ktJHj3*Gcv>VztCSzhSsa{BwwV68{VZ*Lka3`x)y?;-B&UXNiA~ zc|+o#Nhg>k{+UpSEb-6KSy|$riMPrU{|s=ICH{FkShAlxIFTj(`KJ>&S>m6=VwCvj zm>4DgITR|1e-1aL#6J^q(viSB{h6wiS>m66u*@a?nIx83;-ACAEAh{v1WNpK3{i=H zj`>;QpJAx7#6QOdLE@idizo5VAuLGzGnikN_~+QsOZ+n-fm!08W0{cnXF@MKwYqHx zp4rbGKg$yT95jo>KgU8Z@y~<=W{H0$Brr?-Ga-Rl;-3i#%o6_`-hYXI4#idCpTl7! z@y`JQN&GV*fm!08V;z+E=KxeB{y9V^iGL2~xQ8U~8{B8(8S6{ppTlV@@z3OR$`b!f zEMAuQX95EDogxx z09O+Kyd5mr&n%h}|NM<3L*k#qVwCvjm>8op`Ahye6e@{-4mYL5Kfm|8yHC%z_H*Bs zl6vrleou?s7U-XA%R8WpIHZs#6JV56p4REbSe`6jB^|# z_ij9Q5w%GCGfvwg@z14;PDSFMiNz}t|4hI_k@#o)gGJ(>2{|be|4bp^BJs~?1x4bY zf5kf5yXGB>ovC zut@xK3{i=Hj`>;QpJAwq#6QPeC-KiQ$w>S&!a|YwXE47a@z3n&i^M+@5?CbuIhF~D zerIZ8}qKTBL2Z5@y~>u6p4RcS*;}gIkW`|gL`AhqG>pIDP?qYS3_~+VX!y@s|hK2KACKRGb{4)?jk@#ogt%}4y16&n}f8Gw3>}M8DiGTjaVwCvjuoxx&IVMJle-4F8 z;-7I-7Kwj;aCb}m^C#Of5yXGB>p*+K#6~jAu93DF+WTEGYnOc z_~+OlNc?kb@g)8^gawIz2Jd3_I)&fZx=ym6JO5lH{<&hfO8j$(dlLT)1y|bd zyW^k3`jYr(y#GbwpJU#T_-Arf7m0r+6rxD{GY~?N_-Ep+io`zyTos9b-VT=RXBJI~ zfBwc|l=$bc7$yEWCPs;W4uwkMpTkWl@y`#YT)ze1?)2vmwxtsPOx>j-@y{eNE)xG7 z9$txm4kb|HpJRwh{Bz9D68{WCRV4m7HV6{`9FvU1KZmd&@y}p>MdF`hM=$Zugaj6e ze~x8B;-3kp+pL5Y73KtDBs(^lf2 z$>~%i{+U?3BJs}zEEI`<4*#IUKNE6NB>tJ|8%5%uLo1N@=U;JA6#aSYI>~-UQkMAV zSTrU6xgr}%{BwwV68{_&oWwte^(FDo;r*BR=a@Gn{+XQBMdF_cg(wpL91w!UKND|N zB>p*oD~W&J4wmd^7EOtN{=#CE_~)<~CH^@kMu~q8g-YU|!%Zpi&yRk0x8ZzmKlg2E z?Tqma|4iMbD)G-GF|JbnXFR-B;-66htHeJuM61L##6JT9Rf&HlB(O^SGwWcL_-6o> zD)G;VPF3QcagH~m;C4S)JB(K){u!rjmH20JI#r2(CKj(s{4)UyRpOuV4_1kPCgh|_ z{PWUvq$=^xXa!Z`pFiWGNcMAg2Um%I-nvfFpJUOK_~+VX!z%I5h+>}+~pWnEyov;p-TKSm|vCnXLj^e;-3i#tP=km z%Y?)~6MD(+|IU8yC|8yEXK0ox@z1f)OZ+n-fmPz42??wc|4c|=mH1~u0;|M73BRW-yf5thEKy^2svA!h!8K-TP z_-ArDRf&Hl7OzVDGXV=#;-B#kR*8Qm%je_g_vd^0tiL{L zv45J`&9=1h&+jI-_&>Fj=BcxF*M!QCV)NBnZ5|4%pz+V2?@Ri8EJk?R7U1*o{$rj_ zOECBKc=vO^f5u$b`R8iiw?-`1h{YP=&3bcw*_~^T^wAvpZ>2W=+02=*H`A;Cq5t>)FU2vK diff --git a/MPK61/Preset.syx b/MPK61/Preset.syx deleted file mode 100644 index c34941f5333ac926c6dc939b55c56faec00d34f2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1039 zcmY+@$4&!55CqWa?j3m2FpD@S&e(TqUh~ zwLgvUJ=)(Vtx(KGZ6JwwmXGxQ8SL(kBY!H_=X zkdI*>BR)objQJS%F_8}SoO(_@r=C;Kspr&l>N)eAdCoj%o-@yx=gf2FIm!6v5hM$8 zAP)+l2uh&bLo;3rnguzK2L(_BB~b2R8GxRlXXqJv>8Ij9L(kBY^jU)p_!HODKYjfE E0fGWj8UO$Q diff --git a/MPK88/Preset.syx b/MPK88/Preset.syx deleted file mode 100644 index 4ce9e0948f129700af9169954eb68f351917adb6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1039 zcmY+@$4&!55CqWa?j3E-K1NzLbqYELwD&O-KPiikXGpt<^Gam zp1H^5lxOZU;Xadd-gB?<66eVM#%rFr=g2+BTi$cuvB@*{9=Z4Uz_%yZ^B^PG9kJZGLW&q>BVk04o) z19?yYMNk6e9-8r5&@9M-JSc!7D1mYh%K-EYJwwmXOFtF=8G44Eq|X{;z@NCD{^{fQ E4{5qm6aWAK diff --git a/Push/Preset.syx b/Push/Preset.syx deleted file mode 100644 index 5554320b3096b16e402408c69ba4cf55c4b66c22..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 121030 zcma&vS(XF2t}bRjdo4eD9_pL>rq z-u}9*S+u1;9TxK_>jrYx3~Xa{?q$; z(}R!y8-q_X`1HRq_&9^l|7Gywc*%_~w?~6fi2e^SZ{PRa__E#h;Hyu&{rd7hE$}_4 zyZsk1=Hl(ZY@h!})NN*Ph+t;;`~G;}i&nPRhWTmi^SHkJJNWZ?Xd{F+-{re<`rx{F1y3wvOgSdxA%7QT7!eR&1SnfKW~TQX8T03 zF5JE^xA%LOEAKZKHz8d^&$-)npZ}r%%l-YnZ*Pb7dB43J&hLi#8hWl>^jPq+7d zv%eqqgSr4)19t}O?~e>R?+x~J%%NwqAV>e-KlcX`@rL3aoBQr^!~n$VeFlec%$ikRPWR^?@1fc=D%cU^3ycxD zqzf?c@SX|-4bi`SWY+EVaL5GjiwRn%l)@tx_m~1WE^w>c_2QK+eeQ0T%jVfscVDs= z@!MW{=USxc0xIM2I@}{T9>;+>RQ9qk^S%x?pZwn4Ha9Bcc8PR)o!-}>jeF?12J8*E z^nk4ax#;M-=k4-nwz?NRo8kF*Uw0XBA$PXxqUU*i_nuA3e&1dO+B#oDXs%8<7j1s1Wn+PcwMQXIz?ghuu6LPS;yDC$ASYF1C6^ zK4+De`{DXPl8@W@zPb3%Aa~tA@Z`0*`w@DkqCg!k-s}$spV0kbci3P0qIVPjBloKN z865bTW;EK{b`l4{3iZ72?S6!wuar5~SbC(4ythew4#Sdl!`;{(1lJ|1_RpY&6W6x42UGUULmhV*{!KAQ~HujJ4VnTn+rl} zF&0{RU9Ou`)AKX*e7hFR*Zs)4$hmDvDRjp0ZEWP5tm}quSzzb${YBDj1D(5U-#2u! zy*8eoMGx5stKP??d&6zt;f|d4rzb_b@XY3g)FYZbQtKfm|G%<|-dj6F%hLASYj5{6 z^t{L3SH=_dc-+y9eX(B_C0=H5==q8q&4jMa1mE@f^geEZbmMiOoA{rh7r#4QWp=VTR=?l+?OO@n9uH?nKfO(8@ zPc6)!MbBnAZnwA7bYE2&neR+Ip0*{l_TC?->uGo90G-Ll z)BE%~HR@iW=b6E6-8}ysPAsVzM7Fu zmYL7jR})fsn_~S4!%OqEc@JmRsJRs_nXVnZ8iv!_`NaNb^~+h z`NV))nkk;ln>BW4kFm8dYjAaF$&anWR`2uUh2f$OF?oIFT~W(ruH9P{yt1S9<0%4T z{aBxOKI2EM$vbU47d@N#o|#FX*vuJ>on9>7HFN91a$f7l%zXZt`EF)@*Qc z)-Vf?eRtr7>&v7xO)IfePyTk#$^R!knbfU>0Inzh<>> zr415E8N>A*aHU@^lL2IHK2+3W~w)@DOf%sE|+4<6JMO1qAkCyJo3Ih-p8B3tv z-p`3b@b~jtJBG244i>u{ZsC&c_UMCfR;!73=$U;KV{{usB((Aq(8-U$p6xC%nl&y_ zTOu~lXOQ=jriT`}6%NZxMKsUYGogS3J!iXO`?G_NYrAW+H>@T&W{>di58e;u=d&Fy zF}^rVOKIYEP$4tx1l#TSc(N3|bF7w+g>Ej7nb{D;x!5Q)jN%qXYLdu$+rD=2NXbWO zk~3zmkWd05v7OQNW5Yp%z&5CkS+-Y1;NpYubN;4Bb1~|=6PiWOOvjAP1_Wj^p1DYe z%WfBU)WT~QH?zRd^LZOP`+a^T*mNsdVa}Hga$m;lYY`GjpP9oxf-AXYF2V#H#Gi{! zYX^^IJI@St5ko0!SX&;zKT1=4EHej|8er!00qY9moDV#W)8oc$JFo3Nu2?7!6FvwZ z;+aL50m#;}V-~-xzp+vBd4IT;BZGbgF46y*k?9*M+1&>lq33<%MeO?wju>c()FGqo zb|nce+vh1x z4bKX15;q)$9vpgph&24r!~J-niGv}LllS9#xZawcx1r~tOl)I|RZm%HSUMYXDTf{D zEED3m4GrO&^@G(u5PU$mWuPB3!DFf8+k z2tAT(==cZj3!|b#a7X4O86zn47w+JG;BLSBxQ%?y>l<$ImI9&R&aG!tLeGT-aPxi& z?&p&Qiyv=KVP7cx;5qbsikn7kzeH_$ z*~5uhuD47(%ku{AN*~%f0@NV%=UI`8%rg()7FwJ(Ec52b7Xst3hxw z9CRN%mwXmL4ZH`M5W{^Wl2plyA#~jhoD6Y5BJ{# zVaP<0-90$;9GG$6uNegrYhZrcEQj|A<%q8i zSseGF=U_uN=`w1$469Z`5oH~MI|^zB<~w)}v?<5)a1i>k?$Zx84o!9Y(6b1=|MR}c zp~Wo%{xMkEMBcO>+=rgCVv7AHtA?bq^^4mBFOP!7(WnbO?;=cMF2tH)2p(yu`Wy>~ z+&iy0(vgMiMXL&Yxk=fh-hUWCM(nj~whC#)6Zr?j-Y)NFoQ}|T$IMv^21kY!p?e>` zJ66Vndv|*g=nLj}1;?IL0C}T2>N9gk}?nM2IUi#Oz^& zu+v+7nZj892%cbcQsaAWO4PrlciPfEG#H`hY-!C%qq^uCgUYNdj)lt23=Sd6f^i0y z)fIDXtS$+e#6{(8dgjn`X7=V4?xuNY?7SzxctbEB)aBRLio1$Dufg6!B=NcH{oS+? zoF)F8nJ=Pdpvo@7LEecj!49#P1|#%bXp`y;R+fbejX~jy(zgfufjRUXOL10C{sLtn7O} zN=)~Pu0=;B4t9vOYq2Adj-lt*?Jk0%l;O1eZ)FF;`E_f}!P2#5-7%Y+SXRe$)qG{MoveZip%8hj*G53GaDv)68_s0P8kU5g+i(- zzHiT5%(rNvuC#RUT{2+!84p$TeVn9|d}(GlD8hONRIf>14XjKDXPk1_3x)PY2VQ~%`R%ZKwYEBio-@Ns%(gB{DTcVrO2Ow& zyZgvGu{jKDpw(Ydc!+|y@#1#i;-xBE*dWNXeg};vlYws%**Z z%FcdfZ$CxhlvlxmqDczi_%GH_aUqH9^Pn#Bd96BL8XZCyK99tidz>vZ`tUF_mwd*u z`B7!n_VzxABH}6n@rkoJlMJ?|jnH#o6fvoQdIsSdStmYX!6((+CPe6pYI8f=u)RlX z?PTuOXAWP4^ksxh?TI=iLU!F6j_cOB+j(aV(AsrhM=C5UMtW{;d!lU3z6UeZU9+BJv(-8D%vShd;3bhbFw0EW z1n(B+;-R2o=mZ02vt^NFSK(I;N>F-z%*?rIZgmDmwzd>bf5 zRME|*l)YgfUGSNim5ZPRxcrwpOkm+0dN$Mc^}OFRyMG?f&7Baz?>-Zh&r=h>V11Yf z7W^Q9@|hh?6x=S_O$aUk$bylXGZTeb7mIvs!(LE0R&Jp=v+h+lJUH_%+HGA769tB7 zAj!1YD!3u_fq^l1JyR|w%${wasE+Qzq30$~MKp!Bnz06FN5kDs8;K$)#DuEe-o=ON z4sYjRNtNDRlDcOu`JAiSp)Bi2tU}>7U<5pubdqb8A#dzi81$OlRo?*~2X*`5!Aq$t zw~8bZ3`zlEB$WN9lW5s9aK?xblBpKJ@0ml- zXAE1iPU0x)IN8Tw2gk!s;&?r{X}2&20~hDfuo(;-iP-RJSKS#HbT$YkO;xdBuEI?&{l<--jTt%XD-ZT z5x4N2LBbYw5${JP()X|Tql*v}^_n;{R%JDqRZ|urDFIqMsfKjhQnc!yGegp~*}?pM z2+3xnh_VFzo-NG9_j^Of?>v&fo%X3@SfJ^u|-wf zc=<6^S+Qo=?b!-xPdgA${5Fw5nY!3@V2t%|*TA?Um?`)Gl3G_*``VOL1Foxs1*qeS z7Q=dXrO!ONa}yFzK2vA6-)eq-h=`f?8rNx}YS-EoGaj}=vBkZ>u}SO+Ui=MYwSPjAPdVDKQzaO8n|9h_MQHJtx-c2fsc%guvkq&3(+Wvk~ zUj6r^x7R{<*=_TEujs`Vq8 z#3X&!gKLM`GyBX;cI-2=DvNz)7H;30x$BXRRsWoP_dYW#lG|2^3DHBIAT)_IB!L@~%DHUKMD9pGRdcg5+ z<_c!C*;Dn;E-j|tm>INK%tckC;vHs=Ef}l*Ih>(D0{Cv7_)Hd-`eh3(xMv4L&jqL1 zvvUPE6bA0>Y~Vh%C|LE+wPHU;@nq&m!2A-*0rzKc$>*qu%nZK3o$W5&SY>ZNgTt>y z1A+Vb1b2!Ma66A^!R_MaY5U?eh+g%FJu--Y+hL1iH}4<_tqBLr_=%IZ{s$zi>Yx?(^kISw?wM? z=ge9^MwP|h=p+}clnR;LG1C@U$CS7BY;lo_y4M6d>Uc9SM?U9A=XuUmkb)$AuGmaw zPF?g!{tjElc)BOB@L6OAJzq++m=%4jV4cd!ZtZ38%$R(24CdNR=EQY>6I5yB0b4D%5K2+%Z)8J9CFi$0Lf z>tj%@`sb|}LhLxkpoHKH)VsKN|HlK#IyDauDJU&+aGwZUC zXf8fvc$8(3iq43$KHYGv`sXmJXtu%?L@Qd(h_L+qn1wUp1cY7AG$9$-q`+2Na%&&y-&l!}vx1kdw=W`H2pd%r=SeqI96n#ne@hxgB2n=StK;o{ANRlNpF{j%*jEa? z5srYewB?StfkJV&uIKkTH%%RRV88~$Oc=Fy&^D2*DcFg1n`(ROg}U`w zutH#KKbCnZf+(P{N-p5p*8rM6c&_jI@NAV+7YQ{j=Pv7_pSviBUV>44z_85Nx9q!+ zTul4E%fuL~{<&|t2E@u;9Fx1$cU>gL+%xm(=a%b&-*-9OTX%C}78u9h3u7HKwC66( zDOLZRySn8X@N<{-*7Zgq{}|wPS0*@4J}4!3ZV25s%rE-otTG^cWWv=Hy|%JG9b_orvmw zgm{QP18?Yf$}+|XT=marIBtiKUqt|&2fcBH=roh4Vwm(#}98|JEiuA?-3%ezt?QQ|6-7cVac5b+jwTnj9NEPl6aH@D^>WGJhOXm?ISkRx0u zne~>URsVd;51oa6ef{l%lvv)UYN+a;84;w^`XPV2xCZ5K7mFS@808PHS$8UK+yNN- z^KEw$=~4-XyVXPY;L!6eKP3Gtg-lI;{&s=Eulra9sXCy5T-`t9Zx@T3uYvhIrdriM z6XZ2(^2;~%COnYrYyQb$8%At{vAlHD?=k<6DkD3? z3%MlwLQ&FQ3T%?=s*8sQ-QNvoCUQ*uwWRF2Sb`&_q{^cPBlP@oHo`HksAi#uy_|Kb zvv>Q4T}=qPPrDzZ^_uvZh_|ykiSXl5l4Ab;sR@krDSkFT>~jvNFJQUe&NA&vDu?Re zA$ZPA%+k``A*v+lFE|LD3Z1k9s>#1y=ntn*I7Q+3WAmdwGN& zocoyhYp`!FXF0f;nO1JeZ|Tui>&Gvcm8jYR z37-Xqo|z^12qcjej@d4wwqR5gPjwOFy}+VpvDo8{DO`1YAw4IP zd{Wqk=R{B1J1#A0U7^D;vrNl5IoJsITvUHgbyi|Rp^eN^6dLk~hL!ZaZADF^O#9G~ zc$=}pLZv36QP?&2{a;&5apR!m%SB1aRz=7eya7Ub7Ymb!5BYw@Zb($OqiEKse?^UxUzpr@B^weRW@Z{4nFF~0GCb6K(~`Pjn^R8^T{q*n!!<;;f#8r zs*`i{88phYh0IDkg{?*fA0LbFd`E|3H8qZhC6N?Tm@a2{;>W7k>Prr~uE8%GeAnDGD@+Hb&fF zqv+We6k$otgIdzKNldZ~tiStU$f@d|v3zFjL3I(NF|vscpd32>Y_Wkk5=hQaa)L*M z?f6z1cr}n-zdUCLgI!X?8Ca31Drb;y5G~xxC%lf5dDL7N%53KEeFP^S!vcZVy#1BjE_)CfccysHW!~ypu zWT^a_i!W#Zi5Qi3VR4sHr3AO+U03|piHrJy4_t!LU{Ts7e)%qrKHj%oBxd^qW6Vq8 zZUgCL15u1rylq;P_gHFz_BA-Mf?VKqA7Dkh3`~5tC{Z}Fqr{PyYv%MSiS6?i)DwGN z+9X1Pd)lVjscQ4;M>ym#B@nW$&sL7&RL-K>r8G}f{~SR4mu%&l)n3&F#h}hUHr1{A z=L$EZoWc3DfPe>MJ<3nAbl|_~IaLSA)cJpSR2V#t7GL(vpL6W;A?T+PdvOX9%lg@< zeu&#&8*+P!)Z*^!7`WU0Kh;^!r^_q+O-OJGd3&#Co!UYSPkh$8TdXPfPr-kXm04Id zbK!;gn}1Ht-JE_@2W`jDgg^zS@DUVB0QnNkFD(EX8^IZ%Md>rU^`#?aO}fx(7~yL*z?CmoW^O5@96d3UGd6~STSI3b~ zRsURgP9BVyxW0ZC=!nvhMpjk7@En0@Ywa6Zao0|u{@&$0<)AP@ogSR+&wy7CB$6)7 zk;_f^h+%<(cdsspJyrelCFx+B>R9{!b2`HYC2kP{d$vxg`sZupQlv__g%xAY^0(t+ zI{|;`FgOVT49d>G=$yzSGSv|poO+^b4h6+`X_(92NC8<=eAj5_5?xV9>mvEkE`8y9 znK;GhQ3>E%oKBd3zC%o-VZ|rBz>8(C37QD}IEMRDXuDJG_}n@){O(Zasp_AfTDN>- zLHTB5A-1K z0j9vqFGQNe!CT~t1LB6u{XZq@9PZv-Ypc;NT0qQpJ{98OGnQJ*XR_TZeVg1h@Agua zkd;|_D$CRl5djTz*&Au+kd^~r;>ean%E0qwE3f5TWvhv*f2OMq4D=`^ttsG0S*SwT zEy=Kb3=cB*!u@hc4xg&`B)DF<+-XFlA3rl_9LcXp)vIx;0_WGZzmqJt;8AT=|NQ2U zmsIjo6^bb?R)sA7WvhjtI3?LXY(Ml#_O$vok90-%++8ga%H;I#3`zN z;GWi2$(q;@Jom=f7Sx4{u%362$`s5 z&%uh=+V`e(#FeXO=6`I8;Q05ZjExk@@{dh%EQ!vH)mRRXa>E8A^jy0~T7Du_Eg;h} zE#LKd*gAn##&%gP1mrPijiGCHRbE}!tDZUZJX?SsCNb?H-BTdf0^fKhJ0d+B2%0Yf1v^9jd{v7I_qU<4`PE%#8)1DE*Lv29k2 zfOU77J0&kdBN@zRO=NqgY83^6F5$-*ggNHYFo&MQ(QYq9A(?(nt}dwu-$Zi@1B@P| zqrA=%sLYcxwC{pWx8#7u3Z&>sJ|*yL=Ir*875UL=8|%}(jRnVqVExeNG)9wX)jty+ zk96}$^!2|}K- zduxMqQfT5oE`LxeB*!3Pb=WzjDaA3$o3ltpNTV+FOx7*Ixcj{+3{G=VK+S`d$+)q? zGb8EtQU!n)Xt^Z0P{l1th80ITrS!l$AilYUmszu^5i1VLBwadwl3sx?#-8LCZH)gI zgxtvY^~)ga;{Pi6&2Wx!&&bYj5w!GT)!p^^LCr7>CBSP|XGCk(%O5>BCUFwgxMXR`7q7GXP^o-3b4g4XNi9-@;xo-SGApMc6Lqj!)VYxHdsa1H zwJ}d-b|xCOiZSdZV?YI?Jh9|mY%fMoa_ei7ad@l#dA3@DXc0~6 zV?kn!q-SyX62X{~GSO%4eC@RRs!9J0+B2&_=|br53t9ACDsW1e2j>gr8k7a4o-+kD zffrrZ=5)PiTB`W|kEpHwfQSe)#JRangsSSF{~NpkX9Q!_KgS$?{0Ddg;^^+Ge;#;M z_%2o=XcYq`5LMjs!A9triDVCdlc~iPp2Izzqd&-G<|Sp-*<=Bw?x)vBk>_s8}3?aAl5V~4>{RsZ~8MaMD>`!h=MQq5?i@hl6y>k3Y( z>SBh((2nd}uJc&v4%b8!DAHRz$T&kt(W=XJ{#fW?SPr+Jxe1MpLUXQA15Ba@?ZtO* z38+#&H~MCRdsY98c5&?Q#W@PG7Do(syOnkLA+ss#eTdxDLS@gDCX=X(X_;&b=Bj_b zs!IH3R`zqvsvmVl+_tT)7*XOhb}-ewB>2oacDcePCdD=+fQEZOQfjg^TkhGq5S zb6a1PuT$?J%>t!K<)m+iBL;F6ih|C=;Du{#U8qSjhq!7iWp1N+z;bpo~)@<`tzHjeHDRpgPsIX5wrCrX^ySt&{<{XUjV zju?!h*OBQleoFka0K&&wR)9kaRm?kWEYr88Pc+K`8tSlcE_hv81);#}AFH4f`s-wm zn*BwN$#e%+s+*=|_@ZYW`&~`{T1?bu{K{lcfAmUt=Tm{E1F!JX)Q0$YmC8ydWV=M^IWK zt@>y96|?$<-;|ySuCq>UQge6JKdWAUq@YpQlYKAYqD^^oV7i^584|8jG{(k73DYeIE4DUQT zO2zk7bFdwo->GXm*eG*6^4CJsRfXaZ@>)BXD%RqTlvYQhKCdY-_(7WhpTvC+PJH7V zZEE4EBWP1Ek59kYNOD#c}UV2|a&riX%E&b~ZcVAjd~VKYMV5 z*01g5j(%UPTuNV*9vlQI#-kf-uK*Y?MY_bdjAgE$=z3Zg_Tb3p|74t;1OQ^3j4K`e z^JAPO2XfQf&VXR$S}k$8UQMEjsYGI~AJaDyi{)Q4(nBm9{67{WB>JIKzZnu(9fQ(D znO~c*5Xsn8aHm!OoB=uBL${>ZKlJcT0^?g%{d3&^f|1nEdy0_3^+D;x>#F`aQb+T} zU-;#j5p4^llc}lt=Y)7t#*CM&fW4F!$(cFvBo#%Io2dTJ2?r(;FA*UK|3j`$mdFa_ ztnAv9s(%igf-gmBp|V7Na3LRSrxN(3pr{9zy#b5_ZS=2A+C$97-{;%whempEi6n43 zIgVR&T+P6oD{?nSs~$PBJb~Y;e{QO)sP_I(x+GmyMq8yyn%5@J+*D^U$74|)yT7Pz z1S75epQ%o^rl{_--NEL^B>tkh;2VqiIg#^TAgV|Ts3@-n)IP5a(@@^Xjetz%{t2KW zEJwi}LwRG#RQ+?vFr?{7O_HFIa_LT@&cN2BLvTwZYS_);R47(dx^v!|9fpbn7KYWP zY{6Xh&!OM?fw>(SD_a^Lg}xE^CV(aw!aReG+|sQLtXqpe7k*P1i{n+c^~|xKnNeI1 zI8E_S7slH9otyZ~&T035)jwlcy01!hUgt^3iOQvr0ppH8SoP1JBwRDB&3JsigQ>3S zJXQU3P^9yJSuU%klxDqO3%s_rs`}@@3zQS81y-9k3!Fqo^?&}a3k1e+chx`JfL{wt zX;J7Uw2?)QZ&mfrS@CB>6TdEZhbk7QRQ)qQx)e>ggY_!2E*T(YoM@D! z>3$NRRsYQF!amG5F^-hyr>{b?ys2UjU5|#X`sdI@SW`s=DKWk7AjureZAs$hiARluuA-Q|Am7)zVNnyyd{$

kGq^TcOzzPmRi6HA}0PEpSX2d~lfjiFf}C(xbkMWHJdd@z&h zy24OD(errm>kOSkRv@P{Hk z7mSr#_0M`DfnyD}%RP`FR}~+>Ym!{m|2e^^q*l!=8Wm8^lAt^8_j9jz`E`P*3F(v^ z{%k<%B39C?aL(D57;Bj050;oR;6wPmIr@zh&L@4g>YsD8+|D@~UyOT7&&=mK_YxBA zhV7>$sVa>1VcOQ$+4^Eyr_#ErQ$@7F;oz5k5)*w$gvZxbig9E1%y!4V%%Mzq{+TUW65eh1Bfvj0TL@zWBOj+x_boFT zb-U|AT{j0iRsUxzkMouDDXWk-J>3~r9M~e+J`Za|UWo#Jb9m4fCMI)2vQq+DnXTY7 z^g9e&Cozy_Qju93o0j$lqaiL+&?Mqy}R@1VqS-47l~H=bI``yEVMDCE}jeai_0g#UiHr( zp~&C-7K*WAf~79-F4NXmOU9vR^(MvH4hM$6i#FKH4MxSEKPB#B35}Vdz~aev@rrOi z=u$E;==z1B4$l(A3mZda5_5&FZwyT`FfJ(UnqpgH}=(z*zF?oXvZSg;z^D~oBkakIsao#7YH6v`c%~S(Dzfx&a3a`K zTx32|N0kO)?WYJ$8=Mjb9)#-{KVk+!m*lhfii|0W8A1w$)|vVIpthjWI5|WA`5fb9 zY1p)D&gfMPR<*-la&ImgY{b~%!&WD&G_lxE-K9)HXIJ!VT~wE<3Ui5xxZudDm+w$FwR%UoMJ&g8DgL2CxqZ%zCe@iPzx(!=7{K$CRD zHNqt?GrG(T|1mJX^~uDz8$hLW(!=8Kj5Ue8Q=1SJlEKvrq8IZE{&Yu>uWyS{>>^Oa zy8E}msYp>t(`_NoRsa021L=}*eS~lyRZ9VXUbKqzXsT;3BHKE!_rru7l2UHe$N8PK zufdM@vtrbWh8}F7UW{sTYqQMW$N6dD3|3ktisMkl{N=M-riXQ6+#fuLp6g2@z>}xA z8jHpH@2dwH>Ehe85qhp&bUUO`O}HJ`kvB~K%97AicV6Pp28=Uqy@^fs*#72 zU+tL-6Y1c>K&@s5#NS;>VJ%Vv0Ge~2)X94YHgIrSc15>1s%9A+kX5q=c$#B{;J9+e zR(Qe%%KDzHZ4Nt|gZ_g`W({JR!J+5$5EU{jAs8z+WskDhkZwbw$r{g9|14j?_D`(c z8so#7O+{PXC7n9EUx8=;n7L+=>&+}mo6+f7;DFLG89na}t_A9!7Etu0XUQnjlmaZf z+5}^7RQ2Q}ryF6lUfW8X+a)Z-1h@vCROXz)qiebZjaUV|QfXCgK%o&sqU;VU25aa?)L zn!O!_b5%HpvrAViAy7}X6Wc&0agOwag_+k1hOj?PnhdYX1cIHdsxVBQb#)a`eTPo( z+dkT}N_z+ajigrBe?5nEC6uIuUk^FuA(^O6wU~LyXAfvsUC^0f!EToU8s>C7FK7snDuaLsG)1%1Zr8Ja9r5r&Rs($8r&P%Go>V zbT(xye0wNY(H&wXQUWvP(*>opm0+v~$(k76#TjKn(ygUuF8Pd#LkA^dQ0gzJi**#JFo&P)J0W7RZ~dj?`jv^?Gl!l%3j=$vA5vcHa5Z}%_H#jv%zJR; zbFL;YsMmFYU`)dJ+D@cwSTug{9D2^+qZu^~PA5F*LTw(^H#9T!T=mb1)%`_RVJIao zL~%#9^SlUzga#NKM1k!kY7C4uMOQ5hbuSCY*$kFakfUH@EW!pxMY-U+Vp8*XYfpnQ zSg+LVjRpi!<+cUIX)u^O_}A>MCaV6qy2HY?hH)~-xn9b^NRrUWHd zGl+=-bFhMRJ;$qv=(5>WP+z*E@U@)+&6b zr48EX_{O>FpII;B7%Lp>kf!ILruCfwZzQRc|DC4vSOgObLXxt6Na3-pYnc%+k?nq@ zD5$r)`acI_rQAKeIFH2iu#EYff=Wi|BBbj7974~d`O*y7&Z+a#3GTsKCXJZt+djS_ zBbfzSVd9<6OL>-S!5gO6Af8@KczI?HJ?9X0LRz7cV^TT!tWJG>Z>m;pRsWpZ8AS1D zTb1g<#DQ^Y)J$bCXMv&T8Nh%?S7Zg%1~b8V4eq3Xqs<0;bRcM)Bc+CtW`%2JjJ%aK z*!NSHWSnhT$ZfF`^o36CW`7n82&;8Q%+bIsaDKER^!hVc5`Q2Mu5dph;ZRG zd&27|psIhS^{kPsfWCM!Vizh%euVjEB{Dqc&^$SvBN11^+tW5ofV-eP(TIcuMt7e= z&sl_D&~?WLmbP+?v}l{f(AWq)mn{_dl1FJwbRo$pB^Kj3)8}nf7xK&Qq!+?i80aX<;lD6FeaSMpe)_N)x0}^gq^k zeyq=eBbZjP4nLr|yJU>uK@B*|G^Pk)UF?}>Dmf&?=84sEzh)LGfbQHEin%a#1cD2x za8q@p(x4{Mh#}(?;s8jfCA7{?Q7|18BWiOvQM~Y6_0L3wU)XcivWDJ+HhIf6ILEAK z=FoF#-3-nh%v|hM36EdUeNcq9JfU)y)0mHSsVLNJ+!B#%Eq<$2;iIb*z zgzu~Ts5b@Ad6}7cggP+N^Jh?fFE-j$o#(25MqbWa{TZZOAl6E{9d?~e8ad>(ZkL(T zqYN0&{ny3aNIY!sl|iE0z;K-5_Z_IPTUUdt_cIn4Pm$x#f^u;hSD{^xJpQr1Xs_6! zj$=xX#Dj?2q-w<+_BIXPN=Wt&hMu{iHk=z>2Nt4glg@#s!WDVf;;ZVP^J5HaHXE>7 ze9>{j7A4BM^d*N^@*=t7?6LI|>x?Kk+qV{&dKfeq5vn!NlMkeKYM^@SHLy+OJ`Aod z)Yy-aO=jiUzKj7q101ZW2lyi4)+ts0{O9q%o&d``BtH)xIZUmxDi;{RT*SAzvPko$ zay@Md-4bzL#ju`ZoK*cYA!eqN=PJ>xV%rtmSM^n~0h3K6FfnXQ0Y-ieGG)E98R|0q zfG=7cy!_W;!>WI-Gj=Y*0wEPWArR`1ztp3mcd2n5P?_{F7#>}jf#($bS;B=Q}$B+zO zkBIh+EcQL#1I+qosAFvYs()TT{pZ2m4O`eaco=sAZt0^7f#FyBpLuIitE z2DvgX4@s*OHjUz7(=;X$=kZ(EhZlcjD|@CKtDPudU^x}c2PJZu}Y*!7jQg++=%i=fP4u3^SAa z2iq)GEBa1_hWFTc1p+1~+!l$h`e#x!xAYX{i!@E$UYJT`twHMGUe!MfVC89TYmkgh z&iNL+48ug#N3}(bt{_-(5|frm{w|G_g8ko|BH z7DXzEWd?_y^Tp3k`Q%H&EHl&JlD>`S$mhD9FY}J>*5?^4E;2l*x7iwO6>BD+%PYca zkJ}28j4+QqrE8v@OWd~dW8zJDhDrYB;cUsQnK?08PWfB7#Is`KVXX@mNXfOCq>E}C z5X#g&BZ{JqGvntjbhu zqG%@q`6T{MjnfaCNp76lysY|XPD42aadKY_{=_ZI_m0CW@-eyNgs=e`K-^f^?QkjI1eX=HUZl_3mZzW!J2>WOFJ3LP)Hd zJ~7AmH&`ing_H?nLj^gBf(#PJtEVw`qkt#}3Ot{e(nC81gQUv@ieTMg3_0OR)%`P%xz;SU@f$vk$ z-cq#cpEHa2;b%a)BGzEv$CaLpSoMDn79{Ie!(tX|N2g#8MF~Q6m9l1dK?lEY={+y4C-=Zyogg z8VC~!=JXL)hx$OAr$G-0Faxs-)_@>Ig=>4}(6a&o75|AW@b~*J`m}!nXJcMg{c~mr z)8&E~IiCyogNd|FPIJ{i$I;CNvTU_CKKmwrYAi0i)e)+yf37&4!RmREkyXPZSu}V~ zQl_JqRsWmKgW@%%@AYsH9iJ~ZLfn~*K&}ybKy3FlhvkHtF^g)?H3KIf`SXhHm{-Buzdlz0aNc7euS}Hz|!N#PptNuCI zSdmIbk-_?R6$6W07P$_mRQ)p!aX2q(#Wv%nS;Hz6lWT6OtNQ2p(J{*F4Q1yulC2{s zb*J7GMhP6jGw)pdJTq6hzKDF%4;Ve6&d$`V{?AOUnQ1P-T~2Z>kS5Vw_0JUs&x3;C z_A}V&s4#67-ko^QlEiU2zD@=tT_Vny3I+$WZqmNO*Nx}UvleYklO9lBaUQ8ez)%}` zde4N0R{e9Nz8aZ4^t0d$r&wt94g^})*PW~WxkxlW!iq$yo?TGaS&zcbfD!pDz$|CJ zZo4POX5dWnZlziE&ju_s_3f)rESTxf-NC!cLD*VLm}lFanszd&4}~;lrf{O_pMxeU zwb1m98I#akN?cn3Rc2_=w0f;9%!q+~Ff$sKiRT3qDaNZEY&f?$-^RcR+!KGUT+^&R z7dgfC!3^*UGu@!?T;>%^j=jG&!hm0x5oEY-T6|Ug^Y`sZ!g{tqFq7^c`aQ|>EZRtW z8n5Ws^?XP9iBCxeW#NF!OXvL_9Q!%eWFo>I4^74p-X4w@Z5zSoM^dT>m%V{V(}utR zsxZCeo_UYtD0-e8{p$bhaz0Vb`cxP{J1Db9c$=t;YUG*BgOJJv!d@ywqU-lm9nZrA zf@NJBf3EuHmd^=5ZYddqDWq&a{hJ48Pl}}KpJOk#YO5b(lE%ZzKqkJ8>IH_L>r1{+ zy0s~H0_V+hJ$jS05Sn_Vs(;2q8YCvjP7KO+*WYZfdUx@N+}I{-W*Dg-#rx~so$4L+ zbblm=PiJt14~h4=y(6zR*xe>{Py4T#yZZks_-{x#SFyQDd-f_w1SZENt?okp0i z`e%4$awP-yVN#uI3-1A|8}yy4{yCcJt zqB9peG~$c&(sQH-ho1B0K}Rl(>ol=7rzW;_cuxBD1CP?1F!pVBLwJUkwYU{|jnIoK z_S+OMVX(Y?NYtYt7mPurT)hs$}PF4m(Qk!&RQumFz&~t=% z9tas&^Se58B*VzA4#i2p^x&dVlAr?$Oh?ogIpTfwbYh^d1qRD`VgDMmq7Z|L67m+` zne@14WKqgfzc7KQKA6a2n?%FPf$ldZ(n0Zq3DnnY6^EY!`a&5wU6|lDY3adgMoC4W=hL|o&XCuzwB(?G*|EdPVeds$gw@$Ip9vow~ene#%ELpcc zo4JFiBDXWR>TrK%*0(^mcg@^3Ql65qV799DW5evA$od?Z*A6P0oxzzoGd0Z4x)LvI z=9Z!!;M9Zh+WN6)=18s2X6`55a}Q^*nQNw=IaTM6XNoXrfT4|YuAoOn8;c#jkzM{C zeDkq%_A07#&c@93quUkA91pF}eFtJ%)59JddY+sFFr3-dQ>_=4i`Ald7;_H}J=c%D zFs-nXYOHOp!Cc5r%vFYD4es0Oh(OrhJXbV^{#U^#0mDE} zd_b}z4J}B$Rpv1Cy!t-_&Hyc7ggMV^;t&8 zFE&;6@-nt{NDa(wfkW59lORqW>69JhGeul0g;}yLIXbVaS@zX$&)(O}GNOs#0VjUL z@~uQNjV0{wx_Hnfu;QY;#lsY}+dUGO8f=7~KbiE@%1CN^uupJqcNeFB@NCViq`5Na zP^YhZs@O@EdSplX-gM!y>cRyE5K?tp8{&Wf>QQ-PB2Qn{UTk14`JCz%Iz`l0jz+Pt z?TD{6V05}+CGjlkl^JX{x6*vs{^yEH6xl9yTwTxe00KFB89lS|OjU#Y3{(-InsiYe zHbpl_ivpv?n4KW!ATJEDI+81I?uK};!bC!43R0-gsW)+g0!-p?%l$8PWUvPM+CyV$K`apm6Wh~iMxkMi*Kwx z6w>_Y7SgNDgRA~IE6;;7Dgn5Ni4CIYR;jVhKOK-H4ow{N;g^)y;_@1dCkm9kqg8ls zZA$fjE{PAGL(0IiDfyciO)Zs+w)vV>A zBKe>tkz|eJ=|lG?fqp?`&CNQ@g8L1A(5ewAi(%zk#_v`j=8&ez8|KL8?M*~GO<#`` zK{^UW@c=~dq@Fu58g`X=n6-+HaRvGXNrhNSMJ%D-Ge>u8T|MotN2=RyS* zsqb_TE_)*_`zTC;g5ZW-^aXl|psdxKYc|6_qyxXPePEP9W$ z;eL4Z{90huKl8>d2F8(DeC_g2xFb3Rr8Zc`ya$J#>qiVLx5y<} zz0ATPCu=={q{5v!bz2)lJjutj+^EDq!CDo{h(y?>|GwX$WYwUwcYK-pXTB#1lhzKv zNe9x%>yT&&tWPGEyJmI$WbIrajaC=LnT+7WM`K>D`ezX4g0i+^Hdq6pM|x3s7~$Ov zYno61u2{D%Ijx93FF_{XJCV`(!K7x~47*oBEG?_%SM^icP%pQXPx^Sau}uNyWdqb| zXEv34k#Ql-T3qa;e&#sWs|MS^_)>KxmBB_-@9H>AStHxA7#VN9gWMwFs}Nw_Nm#~m zJQ2l0JJIa&4CeAzd?Qs}eoWCs-X-XyxK!S@Nz4N39}s5_;nbj@NIJOwCT_aXmLnZA z7(DeOG}n6)#rKrXh`2VM%est`%tmNV{Wyk0sxIfp`R!7H%c_6o(#^t%f-H=h{u$h7 z(xpm(QHtK}hOWiVBj;%JMS&w2SN~^1uuih{k5n*03sH{g1vX;Z0QNJO6L#AR%H&{BCg`%LOV>ZM zgG5~`d7IS}<}OY(xv+q|UdxR%-6X?nfrnX83b=DPyu>ry4d3^Puvgg@HW_ z-J<0L?d zzvDUq(*c}I)jy{K*9LIBiUHQlEg%yI8i`beo-37_85jNLNK)uILNODJYODUw$XWPi zsbgXm>lx!DWUWIu^NQ-%#qI?RpDvL+ZdDDkskJWCMC~RaI4;r z<2+X3NsDb$+7qEaQP?kNh5HWQ2)S4~s5OY8TLJ+s?y>2qk95*|U_p&zAhF z{@GUdfQt18zu86ML1#!Ag>BbW{WDqyn5f0uVp7;5ibC{zzTm>nZe|X}$N`1zs;gh2 zQA~Rc^dkdyoX4tv&Md?LiRd6~@0v53oU^qDBkQW9Oq%jD(+s8qKNIvhpiA|C4!q3Z z#w*JxvjQt1QPGner--uM@=2*D2eqPNT@$4QuOdEiJECQ~E*EoTf!N1PVn=u&j*TXq zvOVvNm7AM<3^pLWK)CdMc zeE{cDc2>{@S=@YaoF>Z;Mu67M!KxVe7n*_*`xe|F?;u;POj`wo=(V6I1R|qmj{WDb zPTys=yXv1ggEohg0_HG5s7&brtXGIqE-hD3%bJjrz5@E%-3BOUCB`7_ITx|^PxMp7cpA$@|Xf_V#gNx7RDXZm* z#0B~c4n1EzR)z2q+DxFL7RvZD__o06M}u9e{`oJAf5Ga5s6xE~;17(Ud8yUYqLXZ5 z!%VUupGek%wH}vH2Df;DiArvF|BXv(O1#(6idb~{sS|F%-GLW|D08WCHX6VDlHYA$pZD{ka|$| z+`d)SKi_l^(45RkIf+LnBq=H7X1k-HBz zLeFYrgd1>uY>TWj@gyv^XSfqy4)QfNLN9l4{g(SnqHmx=EQ`IzD36QO; z_pm|EJ5!LlnZ+sTh1>1x1?mIupM~Xune|~U{Wsh2F~pNN+WFqCV#%nvY|Q*mf5~1=%Wyq-bTD?^Lb53$gR$Zy!wLywg zj%lmmxD0bfOmN}3`ad%)?2Le0QZ@Bg-A!^M=@!y%V2)Hdr&!vT`7f0l$Ug7$Y9+M&_D5LcD_uWfI%L;ciB1UP7|Y z?N7p)P*?5g(OZ?r(j&eFW4U#o7^adjiF}=M4oJ+E9qddhGeM< zq|<~fs{Xm23rI7hgTOJ#&!kjSrk5lrJA5MBRsU>ZLM9&C0%hsO%gS9STfO5oky>Ds zf$N@SqNOT1xxrktYdo{Uwj(!I|K}JdL4M7wp7@5L})&DsadvGnd{%mxK^dQr? z)=FZBHdg(!e6q#Z%=N@N5gVkd3Hg;~xWnY+@3&RVGG@bG{D&P zLlZoY*HE3U+~4!v!;r2xY$HD2oNIK&z^Z@FEj_nS@WWB(+$CK-!Z~eJ9oiPH8u|lHtfvEVOoKkR z=CgZ_Ww$`;z7T4B3XFEZt~SQ5Y1?~C>t3gGEA!h2e)9k`oTSnuHT>1rgr6-Cb~@gv z=8jr*J<_F!FtJDFL_yi-pl>KI&3SOJr8YJqpB>8;o`}@5u*!p!-)*RzM-omgMFR!Y zEw?BFaS8Hpxl@g0K{}w+#9#H#ONgT*g$PjQfRkWC1?tO$*!8I>$A8Q$M86GfFfG&0 z8CAvnQQ<8Zb`@8eXiw})CAFRhi*{q8$qqO>?wiYww}FnWb)A;7^! zVpV&zh*D%1YQ3wT?Vpss^9)<1S+s>*}YSR?i&!d7MOdbbR^ANt9R+@Z+v~ z!FP%@1=Jbb7%B$0fSm`Fl1JkY zuKv%qR`(Q#E)<#NPg+9fyxT9b_5U0D%?evAoknS5D0!WB)#B(7X$?hb@CLi-V9Ds=;U-CNh3Xy(=Qt3>Hw3^*nHZK(Udh< zBDg#2WWXdBTO=ARSxLlcMniG}w&Yy}{FBjjbQz?8)ZV-%*Za=Q_Zi zpu{4is(y;i@I9ZI zj?P1BV+qLwRZPqxBv!laS)&Dt_WFef) zN`}Ptu#_**+RCl|&qXX?q~3nEIunHc?d~X`u7Cc_hFjMnvp}s&>f-MFf04JgfnVH_ znZkEz?n!P(YTU<3uli?r&ode;J*LS@`fOA5xG_Dzh_taqtNuCC(pG36lpY6f&j69A zkSn4CrS$0aHcqPlGhQ%@@+P%Z4E)Smy?f}ETu5FvZG@iPHnYUWh<*@GF8nBSO?}jJ z%CPG!I1OCkB^{J1j2wYjpxhG3@c^hk)R_l$Lw_-R2GmTBnEqT0cFKBzQ5iLGq26>X z>P4}f(i|Uy}&$l?xQSMms`=X#UV05Q+^}q%f7%rfb zIaHisRTk`XtU{vER^2wiL5zTrCyO$w^J7#QJCn^rnEE?n;?T*L>i@iNlX&0ub39p7 zC`?)Qb5|+H8z+&<`i*1N?dro^f`+&j2e_0Q2v5e2y=@vA}%TKY5XFYnNJj=d2%(X*H- z3eLl5)~xYOAN`oEGzf{JPdAjG##Lr6Ng8-_VLzCa@-(=<#9NqJ)qKzvH_k9Fl%}Nf z;J;+xEX1r2a}xW@TNcBjW6&UUl9+?pR$vnF~IqaY3Nv>f~B!K7Okio8Wt z+VBoI!3Eo@YrSMQdz@*+j!uL$|;34sEsLv8?G6F%y}p=WLL;Mc@56?|^k z^EvO5YcamtjIY?gSY?)OmLvmGXJ>THE>)6Ax}~$8hnZOnG+@*Pi9r+K0f~SRpjPm^ z0yriz*n<+Dx`;q`vZU(_tl&z_h+C?Gx~+{kros7v5JXd?7?-8!B7xLAr17l7;u11+ zq`^AUlj?#`;;IeWZ3pO|llnJX1xam_S-S2h_=Qm|>yOnFx zq}SlgQpW1ntr<>rw`U5=dI!sb$V{4HXfZ{x1S^v7S4w7dVprgBOER&Yi({~Rh6xs1 zElnkX#Lw!J0+D=Pj$Ky;>cKnF&5EupQ^-K!0u{9Ax$2*T&*(pVD4}m)rWmSYI#~<$y9P>V4DCo#7>v4BN_yhnX)v?zI6(rqYgq{;T zsKKfL$YUjt7Ma73f)&7G-SyzmGk+?ZV2eFsIV^>6hc28&@KC)T9C{um1*_)+EAZ`S zT#1$`Fk~ex>njhykdRo>hMxxp^%`V8Z>pDJzGp6aj^%=Q6Dclum5~eJ7Q6xRymZQ{ zI4XG4Q8UzrTJ$CJ*caOl?E$TgK;^u^h~bo8H?fuSMDT)BZ-cn2a3zq_U@ZB3Ki>JA z%(eKh%d89~s-sZuE>zxwL(dtMyu@}NxoeQfuuECH@pz*VdvNI439B_olr@4T#%t_* zhgs_HPJrgTsW1JPmM9ri#wqMD64qx>cLjU(#%Aux{wETi9j<0uQHzV!V1xQzV8qWa zs!PZZ2x_eI74t6ex9ill=oQgYk}2EbH_yOYLl2CgnH`MLX`&GNno)hjXT})YP@#IE z!6z7?j_aM+6|>zPB(@^Y%SBeU)voaees1608%V)kU?^^{S|&BdSr{-Zl&pBJkQ~|;{_eq{ z=lYSf+%tIc0J}SF4FF*RbedASX4tY@)9_HxO5Im_wJ<-4?+lV2e!EbTea*}d0fs?= zRWy*_!>6iLB^t}=Gvc8gv1=lsg*xQE9#TGT2;UXAv(k5za?f0FBbu)PgN=($$hi{xqwUWBOJDB)2N z?hXxItN(Lq;r%GRkp?>PIDtctWeuwGJkFtKHpNHQRY1YaDvk@^sXOtvUXndH^juQ3 zqQXT;y-;e~ZU&^THoR8-vl)m1XRc)pSi^$kXD!h4_rlI7`s)9zF5pC?tsirFp2t;E zK9YhUU4Dv84k!o*jfphQ1P}GA|5}9l&qj4!E|xRcSr4YK0itWOlgzZw_IHHpTJ_H( z@h7ts)e!0nHmv2;EeM=m3mBabOmsEI7ZXVJqHIlHFbb$h!w-Ui5g|e=1_tyP@F9a7 z1MMm}O_t0|qgJ0wU+a3{85C)!4!?PZL%3#m&>Z$eq?#EOkLSZm=`_KRGZ;9{R#pEz zu=!y{iPvrl%q_UP3a%cYQ?*Ba_zZjlWoSEakJ+>8pD}EaDT-@74&-dLNDou3kDTz6 zz#J7i2@x^*MV<&e?X=*^5N0{yFk_1rt(Q zO#P^WD=Q|t8@F6m@+HZ@B?{69+=kc+`Gqw7f$5m=wqAW+&s>xkv&U^C^>f?x9x|vP zg+tfXO4P2ixV~EFYFQG;=n@vmIeT$k0M# zgO*{aY*-c^MXd!c>k_{RvlhBiSIp+8GYXXb=B6oGJ2k8RSrRQ3&xPa8LdHFHwU?tL zO`=u*?3B9i5PgM`$!_szQ5$2KNS<`xoocRC|9s338K5jk|Gj9W+L-*EW7A!>nh3Q8 z3Mf<#B7xs54yh&~02B7iQNy-1A&tbg#0A|&oeEA;68FPhEeT{{g0_)X{WF`H z15d%4DY`9ZQ`|AtwDcbGZZJa6wapogs`wcg_4ftaSzzcH9VS{N!&RAsP@UsT;ls?J z7hLmT)jt={5$Dk-p$DfeV8@@i$nyK~sq|`Q_GKDu<)TL462WS!Q@S(T9f4$zToNMT zl-33Pxs$k1vldwO&$)o#p%^|P$JjqHxiLg^tSI7Va40}-jXu5~t9X$6Z#U&T_-K@- z9!OyAnPYE!4Sv`m+D4L&T!VRnf(2+g z1I?BT5nWx_^&1gA7HF1-J`&7b6*un^r73VrMQtvcXixQj7L1iGO7T(7H+hR_@Wcfn z#B?F_cf%a{Y@jbPD31-OiDd%9C91U0Qm1uPwHS;o{ez)GqyEZlzb={uhMs!{G)Wh( zi|X&WsE?UnyZ%{}1)8#;B25**MH}9IKhLT0BuNwJmVq(wp`ITwNT<+bp8(Z<#o!X& z6;hHQYen?A7!P;BRn>?g{$5bel#2-q`?W3}n5}4m;dbBXO8Mbuu^nU7nAG6>@E>OP zv(`hBFw+HfPKENVV14W^RW>tNvNvzt1N4dv*>f;yQTmV~p3Tf3|b=!(3Dn zG*ACY68EMswH<`2e|AJCg=8@)iKEJ~NpcH-N<`kszwsuj{@Jm=y$hhl799~{63qB% zF6cITs+NL{NKY3S$YkTs=3zbzG{&OCALzO@Y}G#}v%z^+kfZ^M`k@Nzq>hLliVW-= zJ-gk^2~uXJV(8H6pNl&`c&+;9nJF4AGdaDe)NiG)bH~vsRsa0=_67^=7K;@LOaN!~ ze_rf^;JFYV;@9c0H*0Lss(*e+QpcSOU8j}Jp^6?vWttzm$1hjc`o6bV0QYNlSwgdS z5kj?b{a3n(>KKql6jc3lei*ul$O_e^5KOp8XN?~oOuFzhb69COJ;2S(T^lb^olWt> zto$#>5GDe0OCnYM^URPRJ{NuQbSLi^eI2BvVx2zNrW$kB^pQi7bQB1;$o5h#zyEl6bAWX zCrb?An-T*N@~|^~YuZ~l&*`)!INuSPYU^&8ErUu zVrH$N>}L9;Nh`GTm4X` zI8k)VW^x`h%4!m=`e&zuh44rpJ=gR8=yyHXZ}~h)uGqISuj@;)LuXBD!ORf{@n54R z#)PQ)=TLBH8H~`r*eW_Ef<51Nd*tit|Ezn|6zwlz8Yr9tcv)R}f zALUbaSF4Wtdqi#mgk-?z z27TA6e~orn zUwv5tVP$OiE_hD4ZA{{JD%-pidRQ~RvcMuu_hLx+y9mk5k|hguB)FF!0?I2fsJ_4? zp9@9-RsD0W{#=lz+!hzAoA-VY&$a5GYu4%}K;UK{ihPzo`xGkZ;!=+(3K%472%do{ zk@$s1--*Kr9XHIDVIG?z$x_=LMi2n-(4{bTeb==;*Q$T!Ir?G6%=2iooXbmaD2o3r zInYPdt`$I_PV|S=0Z(?%PA16bcFA#|3~)?RKu9XD;yoQ3r5FTr%0{9ssgBfeIFZ>f z$KI&ybI)$EDKynRu{vQ3(ZbGbO3|~OakK4-8SlLYL`3AfOf&)Zs(%j5)T@PoS5Qve zEBMwvM>0Pec4n@nVE>ptk!=m9%NBO>M9E1CRQ+?onZq3pX$=^ON(792?uV^twFb5< zVqlyZHM{(PHA$}J!VZl01tXYK6#D^d5*fus*7t$&mee2gc}UY%QS9TU%k=muRsXE+ zlXarOy!U}QbUrXo#X`9P{{}PeRB9zg?5@|A8>{{~@p5N&YgPl;Bvb~rY-~&-qw1eC zb9ga7^76F71w>3*Nzo#*-ju3;PJ*R&j}evKQ^}Y2!iXFbZ*dV))t)9$N_?>$)=Zkl z6-Lslg;Mg|;T~Laq=_f66r8ELn+pLl5~=z>7tw@_vrwv;6j#Omb3vbqx>o&jjYLlD zNvexd_~D}Jvo>-5s(+5lpEJ#($DfPjSM;Jod7@+E4_5v27Z$?$wK;={(DdZ3s{XkM zNeS0q37VsIC7s#`tor913K3#bQoD$0-iMeblV0`D-=gZ@q|+qytNvN5=~sxUBvIID z)%R4~K!W2sda&xBnPtg(6j$Wd-=Zsbnwpa`WJ0C}@j`>bSQSi6ghz}a6>70<{;Gc- zX|E))aH}n4_^*^uP}v85uT}s2&72{67Yox=YDZBMvDR*B)jy{#MG5le!`XVz_$xe5 zKBY=bG^zS1%Mbj54GitHU|}vT!MGl;p|*yxUipG=x$J>}wT6ktcmL<#3z{{AE`T^~ zKvvepYU1iukV&M%pGpcDSmn_}>IV$JXwGQxR~)nkQWwF#YDQ_$hB@|zfvg|wXqg$v z|7KZK+JTAeI8x_RRuO9FAUf?}+g;Gn9$7#2sXcSq8;WLQ>gfNsVg!U?E>I}yiH`VZ zHYLiNrA10K2+XL**wBHQ;d*KXRQ1m(sX6H%e7ie1sUwWbV`b=h!2YuR+qU_u{<+c$ z)R@_m8p*vRcB@peK95AI{?B_Rib}(ki=;{m zVM9zdFTt1;wz()j?gSIn7k}>5N7b}b%vi7{qFEO)19MS6Z*i#88rLygtN!_Wfw-|Q z@*wKED4(awqCz`*qebz2Zks#BlFyx#S+FV^=9K~MA9L7z7RaX+%dZ0D?&Er``sc~e z$k;W!KUtc>d2dS9Kfm>Bvbg zr9&*lX5n7xQK5Q+lCe=Fu?m4&$OOm?bQG-4zvjshu?}99WM&fhYPPGL-^z!p-P&=t7S$O{tx%xMO|l}M5fnQq%yOU zsy`R8?Ya;s%3(RYiEk7b)mLHtJ>PY0e6WQCI%Gj{63)Ta3XJr-;A!n(%1Glmd;Xo< z(^8Zmam5`)eYR0Nn5^rk<;sj0TO{q&zeXJ;OE`$7&S06pB-8XW`C< zyJzBet>`6W3^t(XIk9p_Luz0u7!sX2Rm`Bl2t9`;f(14)0}0XcMTMSIXdH+7&_}ja zbs6ld#Jz-$S9ZzYP7(VYVsNhxDX%B6>nW!o7h9h1o<*#hQ#GpyLh`m&V1vRPvoGI+O@!VJ8rYSgbg#}Dis((&?knsM@l+P@U$_pVVxa*li&oRUk$P4AAe@Mq=id++Z z8G1%lfuk}AnZ1HGN)nmb*-93Hzgt^52w<#11g=e7AzJnWtPH+=*X265s(;Sjn#t({ zt_+5Ju!tlz@(4R5&~}}Ap}@S_j9KY2BNEa| zDBSz4O34S$q2~(2GAx27{C@O2&~h5`Y_QZuch?sZ)?luGbg#dhB7uP!%)GaiD$@qf zC7(NNn8*W7TamTUHHXQ@yo6|bQ$o)HB{e+R6u#kyOy&PQ07Ou7Szi?-E)h`m1PpK{m`RF=sZ-baQljjj40&;{q*y;#g;;2tm2*fk(0nDI2Dk zc0+dNNbn4gxa>BilKBFUFY%9QN80Kl?8%eeI5&#&MRUQQ;3y-wI@nQJG_EOz!XbEF z)!+hrn#5Ae%uSm_;*wom^oYemkWyBwq_q*wFGYF1dVR{$GalgSE@p+6MyzUK$y)s? zq;*)@kvF=}#|Nk_AsXr{)^`;l9BJ2Ykq0-|(53k2QyAemq&E=3mk2-Id0%YwYmL7Y z|11TM_^>4Nlo=(-og@ulExUS4LNj91Ov>bcw#j!_eLfTyJcMyVTFQ zZAnyit@{lN;=P>w&%Hr`?$w+yy&)PzwW1@S*!rd>)-4%E0y?tlZ_-&p*tluMk62go&BX8l*GoCuNp!w*3c`@pcdcErQ z-NK>gyikzC4CWoP?oIic-xslPDgN1b{21_wEI7G4%gSoVo|q-KM;k~F;97O7(Rkv^ zw@Hju+c|C&HT1ijzYVq|w^yBV$hKaZfdSXLCxvi7tj5hemPOfbvnNJIjk$T%QW**q zW_5^rF}FPlO>xu(iU}}Xe>GD)%S<w<%$NQ1J4t1W>6 zVPEW}k@Confd%D1-$l;IMLYHld%9{ez2y^I)0dm=uSXw@%L1{)sT|EOv!9(SBsv&I z!}_#7AOY^Jc`%H*px`haH>m)VH{~l86#qO5*jJ#Hf|zDlm=|~Ot$9Ib8tMNVHXsfw(ICwn~QP9F*42(enM^O z(CMh!XGkN5ZOx_UQ`^_sGnRu_Tkn!ABB|I;ITyAGu?$w8#xqRJ#t&YKe>O}tZDz!C zv5PcTopcn#zEfRRP--Sa-05Q4>fglGvmNZwV9`1AGR#>i-^~D(X3+FX1u?>OsInbI znz2-fI~Y+SF#{G;^I_(b|5?qUF6tR{078;wF6kfgG&(H3eui%HKj-nLS#vNb?{<~Q zANwSB!z~=bbg?;~2S;^QOfFa&YTjd9B+JZiao?9OC5Fi2-!1KIKx;(3H2-sIw$Ttl z!LR@G$9x)Nybo{cj$jh_)FA>Wq>CJvTYDNmA0Sx%8aJ zx)^cT3AtZxiUszFYR&(g8l({HV{j-ahCjZTD{S@bS7Q?0mPj;-6FaS?<<tOHSYd1 zF-jKk*;9&tMjHJwg@l#j0Sph`eU}H2>aoOM{Bw~`NK}R&AREn0Y)v4$wLm4g$Bc9F z&lpQNe^qL!9WWO&)#gYdmGo0vIP@$QK92;)FBf7~HcMzw3g0ZxAGc+Ulm9u%mKaa9 zjwMvWXb`b(ftn^O7e#z)&hqXBWZK8`TDeKQF2=(wl$#f2aVh?}h0?mMS!$EFVxoBz zZB&~s|8JgHDFw!?uF_#IjOIA)fLpb`cA zlsHC^g$~Ba!TM@6n5Hx%zQo{i z33wbrM1(ib5FVMoY~z7|vj2siOF$Vy(EiL4)xuK?kw0w2!0(pGFFD}C+fo<44+itGbt7y(y%H@b>S1zF`P*X zHJWw1@D{H89AzLo*plYYC=4WInNmCL_rl@N1__QMT$@v$;M>oruX%jX%CYto|BSkV z=7lF6I*z=gt?ejl?_HxW{+R$1g6z5rEo7W6m}(dKzCricSccAYWBa`r5n^wMs@LDv zZ*^%}u3U`B<9(#WhuRfKdpjDTXFsqqPVKpv+UvJB)`pL3Z3=hfv^bLx!hQ-^((KiLF0Yo4;4`_w&u`t9&H&BX^9Qf-gd6P`@VR7 zC(Wj?&V6ZXbS)AnO{rVy*(MlMl#O)w%&i)=DGS6*Mz`~`xIM!5>0tH2+H#@hRCE!z z+d_{WJcTQMrDJX|jWa>g<=l{|<@$}7gvH>`k;-7J2p{?npAW55$)fX$DKafGxax|dg zpEH9lYQ8<2;+tUN%xH;5=NCXIC#(*Vc}Jex(MYD6$RP66#nZxnKOM|q@DUNH1vfXH zjqz5d3QC@nu|BmszZ6PrYYsiGr*e%S_Px9-%!x6dv*gxCe~B@fSt956jhr73gZ@<^)As5vdsY;yhQiUe zqdtrB`=?61QGS==pZ}$thbx_$k0Yb_=c%=f@vhZB7QU>W()`a~GFgc7S+M7us7jMlZYom~StyUbz7M}O0 ze^-gz#1{ctvVeX@r~sJuajfFf|>7X3`O! zLx5dlR#Se2?E=d^aT%lMxIYKhZ`QUvMQ%9e!_TKIs`JwP&n{>q`<#MT#@M*bx)5Y$ z@0PcdGWcgvu*ng^=hk>w+C^>XbA#sX>b~Qjo%3>kbR_URE&5AtII8sEpLMKlWvAP= zM#I0~@Oy>r=6^O$FWHXiaGLwQdK=Ze$l{T{C+`HqGl6y>axSk~4$5 zUq8^s7rA4BZA`t&29UP$MkuWXb-3C@D%c34reJT6U3@Imkib*HLi8P;pk}LL9{bgR z6K>hXdkhQ3Kd1Lj$m~kijI(0y#-~F_gMZG-2owF6@hMMR6|eYb&G_US_O1I(KD%}| zEENBIIM0`x+;VQ(+=iFQ|Lo_~u(Z)3)4Q`CpO0g%_~(a0D2GRLWV}={!?_!M^FN;n zIJJEr^JCvA{`s*s&6zPjA{(+kEO+uhhx6K@4R)OIRp zb9MvjF0qdtg94pmIjJS(f2#GS42?ZSW3~BAh>Q0^`8|5~W>As3UEz78)!YR-wVh7q z+OfC}{yEG3Ye6(3S>)2lSt-Ro|6TBN_Hu1=!3AOyMt^^_8m0JYjoh94wBz9|{<*A` z!MbXly&%EW)h^<@!S6O)^iupYdlmQ@)9tcYJ^p&*ZpVXvj*hamJlxzG_ zBEwv0Wx0hkPK)&$ysB-I6Z#ky=2XnUHHAw#j+DJEX-mEMQDjq$2n3Y3HHV&i{_etD zqEh>lMqBNEi@D_(ev5x@ZJGDf7VMPT9xGVrxO|Gr4Ep1%n*mYWg_MofxfK5_{_`(B zXF~2=YBNj2C9Ao7j(sr<@{d#3v^VSW&L9sVmGL9+do5+N%OLaFHGmE+xOWoQKz@?e|GOK{y7)6_6jM!#9m-D?1}it@gNlc zoC}YIyQ&`wZ_bS@7uxZgUmpCvG0|}%*j5@sexNqXJCjC z&_nO@xi_=Z=6%m@poC^pSVQ+>-&wZ}<19C0BGJ{>94)<%lR~iXi#BAoho>X1(~dR= z`-P(Fg(g#Qt??^&R58o(ost`E&7o(>^yH_CF(gWE#yGTC#jF+Ghc5E-_gcI5)~Mj- zpZ5-yGiJd?dSO&3&xq#9`anpw&Z#AuCwl{*t z(^=gA`rAOKk+r`UpgkdCuoscG3=DU7aPiMG&LoaWQyFvESUR|* z`+GS*PnL(zN#N8Nk|ZzC@!H6UAWlGLi__-Nv*JCNJdSYCv`hpOTo8F^GU=PvoK^gD zL{~R-ij%$19%V10$;lg=FqzFNa2fn_Gl)5T29h(XG}sCNfb#{ovjWOEB1Akb25oZH zsok+QiGs<*A{Tua#bHMy{FzoL)28Hs#_WaF1f4VqthTdOB{fzHPxqP}ET`VQNFy9W zh&YqCd{2yxv0p4PfK!@LSyu{W7xxTaE-G`F6H=|LW#&B83b#NHvGt7`x0DE!Q*I?~ z%IYaWTIp79!8-D>H%TBlrv&C0uH%mL=4V#9l5oX+quLf>fv5yg7^^mBw0 zuBsGW!m*s1a5A(3-eXr9I$3Y&d;Gb!jU-H`i4sju0cN&);kEX>ca4M;PvOqwAyJFh zk~xWLgV!+3wS5JKgDZVXn2MxQ5kWPhKUL(c(WXKIA5=31n#w2{{mh+Y=a*S(7$w?7T0D;+v{EAu<9Nd?r=Jct=`ZF8Z{@Pio6+ zq=J*9x~5tdev?yM&E__IFaEhhWx(7d#4F9lBqgp3LCJOtqPRmBdVUVtG+ZbSaoU#W zl)+d8Nzivt_m+;rB=89NWn1ERKM>#b9UM2-;)}dX>s``wg~9cNs_{HJlE~w2<&%Y1 zaJ7h0vp(B|LkAUJ$>mUARt(&egdN>$f@0q`hd+l5XC&BF5_`wn0T98!3=xM$-e>Q| zi&C>W-&zE+cvcenv%NxTxj|sn)*Pb4dtiOIATrO)U8cn8v)SUT_Lo}#q0xnar_1nUG-2w?xCd+wM!BHr??iQ9*5e99tRh1R@zFo72Yzv%teBjGuME z<~09se11-!i(Jcl?6Y6b5&p^mrBWgD~ zz-jL}5p$zf9pa#HjHR%QcxWU5`=quaqYpB`im(+?tw91jl(LIdQSfCYs4~*zTyM4g`G+JgvEQNyZGlM^)Vrd==(|fx?t1* zID-VoU@vF4;v4ZIX;k1tj*;igG+iVLPV{No)h^htiu&AOB!#_YIIxvx3a(n`q?n(fhD);Htt1CBplhvs4Ns zh3TMCM=wiArVR0C542Q5BVjqV2d?~ZW9*yNWp&s9ciQh#8$MWxD=dr#9n!6Wuo_Sf zA?tnrfqXpsihhLJFBbj7G03bYb8#~gBcAQJe>1qZ7sYvFUB=7k2R$ahR4Bdl0R9SXPYSgIqeN(DqAgI1}m_hczYH;kzrg%h77uC272q^C4_%x zNobi}BUunCGnlF|v&g5SaL7_8`!1V^TN}IUCd)81J<4NGqMJg|qM}R5s1sc_6&wNU z2viyMj4?UdYO63pH$@}YoTtKq_zPn@f@^k?|I7uy=8@@`kZ0XJyVl8AIHGE(4a&qd&<69zmaTKsc5=(&W8yi>E(xJrXn_x^J9l;WS~+>lun z^rf5*7<1~Z9&~+#z~Y}d#tWj)J}4N7r-f*;8bBJZ0H zx}zjcH({*5kFZ_*vl_BIWkRH7)H%)xV%0=I4*9PwSB+{my&j+m6ftQIh-Rt|;YX{M zS8FYNvYBo z4JhH|L-^QNPe}DI3NZbLHY#rf_@YLsH5?EU$#$M}+P^s#2(hVnI%AJKqw&)AGp%cX zV!U^{L(gH|E^Oy&qbz4?2My((nOFxe5hA+0$%2wgbC|HdbZ}uLbcWPy^em5;5RbK2 zdZw@YY~w_Mha(mL?BoMdVYB?Oe<6WASn-GGPo_LJfN68_&yK^*8r45y&I7~2Rvcd+Uh^9Y{Z$vn#2bAR7%+7Gw;Per)}LwvIZmAYv&tXCQp%Y0`7jB5&nD% z5c+BK4>#oS65lsQ?0xXhTN{C-7Z3k{gU&)QK3lUz*T+6f3SdQ(deywlMqEy`HtT2$ zhn^8FE@6$d_c-RBm zJV$uGodsL`bJ#0csu56G0ExfK6Kn?^zKxZB-~7)ws6r`V#RBo^M%d&hs?{D_qX5^q99Sub_OT^^A-%@(-r2B zx8IsYB?;*+;0ky-HRl8z6c9Dz@MceGQvjULDOB&pKR?zef6%?gepbnX(%+l^`Cm1H zjHk@W7i-mCZ zNo%H?S)!?(^qE;B&0raE4A}fO-s-*h=VP|YIFo?Jnx)14PDES>=zHEBqrTX`_f|&E2WB6B+1@K zqx@NqjmTBO@^;#x4Z#LPA}uBEcHI2W0|6kTuA(qrexuNm2yB(~46|B$DvY)|ZcD`O zEvRsNrWK^0(&%iPNwXPMJDxGS>Pe=IAs&hG2#ucFHXrdY&K2NZVdHv-D*s zFHbzmqdOI*20*JOjk9#8gb{m=--CB$T=;ljOdvd;!cptQX9`85#jbky6mMaqf{fB? z^gM=EH_0seUusR^Me>Qno|})@!*$o1DZ`&NeZTqWu*E-ruPIM?3gg;lEU@jpwubLD zottgiRw2S&##sDwYWm&tijczA_E^EjAh0o{&D*I^UdEh^%#fOQV{fXCjFgcxq7=l4 z#V%9@A>xgm0((r!Ii}9RKeJPfo}#h`a*aG1K7EW%2pk8V^n~~6rmWrDLcu|~>jJ-%wf&$bp`X*C`kS=SzZ2o6jCG37IQQ>pL0!Xtt z-?ra3Y1RAWe>Rd?Db&HP6qHeg=4eHmMgM*BKYQe8L8BzZWOG)Hec*0pY0BfrCG>+r zAmUO35(sY90D-Jyjx3>n8phy?oBT>TJAoLqOimQu1!f}Z>PDZvPyXlF)tDezwc{d_ zi<1F^QACVSf4|RQc`TI!Nn&;r;FQ91s%wN@cE}Zb2LG%HlEv|F|M>M)8^T0I6K>9s zq>wW1M3iW4;XaWN0a;3Ivfm~7j2zuF-4(v>!}Rh=ZLt{K5^VS;4eWBH;-ABvIUs86 zXk@S{rbd^rvPxf9glKAyxXwytOvUi+B>Iu1V^=bijsn|egg-MVW}$}3bX(H^9F=xd zF92KDDI9txWRtYBZ(&V>rO}%lh`eE|&+Z9jY@~{i_iLvAUQg|q49TWyoEeb@-N`K3?aJSHiw?s5+fId zL8=`XBhU zW1fwgAfIgvIH!Y0X}McYCjT=8@vxwn`0Ul;WjHBwXDV_a?lvRz+}!1UjGe9{ zkr6Q(>9jL@{JMoh&tE#kA{|-oc*xkI(cu<$dn|u_a)J@tWMo0(Y;iH`1JhY~A7 zNf^w={e%VUFc!A6TNVFI7^#+qyRrXavI=mGNE#Y!$)ESLO%(r}9UAL0dn)IhHZsIH zwgUj_Q0)c{y%+zS3v?GMJk61WK=B=o z*f*G=S|jzYDrs)?3_{aB>Ci^#`8Fy4;_vRtN?{yd9P8Y)0B>g}D;!W7C!mhS9lU1y zJu}MmTIAb|(lb*EuME?TE2j{VHi5v>Rmj;fh0|m@>t~@1TbcWbx^?qGSrq?lj3dwL zrkZSNk2hNmTCz2lo}C?g)4aij!}95XUWDUX##)SO1Xo7 zey9x>vmqnD6pW+3^s#HO znvr9qJ50c%<(iq)a3`+}&K)N+_~(@AMgPqE77i{TENy47#F6&4P;9uZ(La4J&pUcb z@y~r+FNX7_Q0~b4#0MR|QT+2UO2^k7kN*)fC3n(`+xr0_6#uL#Mvfe6o07l`p3Hs{ zYt5X$^t5THN3&%3wwv0sI~JUo5weZ_Szc#P$tI18iJOlF;{AAvRzkc^`i+%0ihs^8 zZYXZU9+J)8F752ArqlqgSrW}o2`4Cu4DT#&M$Ismg>dZ+B;Fb4!9RP{-hjPgTVm>G zE0i_+2~ukLbMem!FN_&I1sQ5l6KBQidm1LFbm0eaiaDLw~bH;NuXRGf;a|XmepAk%dBL^P*#^9fE z<6fYB_uXEU_mD!ixy$Kb_;cEPjhA=w?HTs@+$PxOH}Qq7JdFVTq_)`renIaJ4M!|l zvi-5hy(IFnCin~D-L1^=DyGq$_ib4P8<8=LU!S`o9%{28LJVndhH(>&EBN^re*)+6K{Ieennu{MUYIAfHfFq;LAR&(Q*vyN6w!`Y48?JGG zZ;f<|>c+gi8(!x~Ld|Fm;0DQl#&Rja&fEk+grH(P+h#=GkZ4^S$^1UsYr{vqj@Eg@ z1h;VLIb7)({Nj)(N+wATCGxgeDqFbnvqwpr{g2vXabe^;g*6Hpk&P-75sOKSo+K?7 z_{(ng8zA`()8`7J527=oCGB1)w#LCFIYTkhh6J1236Zd|LPSdIrglduuV9dD9j6(gXFvST zfkBS)Ox6Zc_cI`!Kxx$QN;2CR6;gaZ^1#+Iy6l zP+`#v*uyh1>JWcuKP1X=72+r18ZwR4!Ku~qxgOP{=nh!gXoLdD7(@(PIJUwNa-Lua zkNnKXZIC?fha@t&7_s4zh(b4Kx(ELph~M(LMj=-FJ?as zR6-QVDLnR#$s-e=3O7Iyd~C!~Nz>8$8Ak8LKgS&tcVBYyN4CRc(TCnJV(O?JW}6Xu zP66m6>*NM8A*O@Ha`M=dH}nDgQ}UmqY(}E!H7O8p||}w5K#L4UE3ru%4wp1*hK)auxoD+B=ku^r3|Wh8Pq zS~^uDGQpj6V29PLi?fE;Y*_sB4@Vf=d2MS<=-xSBu&mEceym2HjT)|N+B?aMXUrL} z-#m#s#R^(h5Us#K$3-zC00uPf9d0zq73COnP|>tGEzZlJ(mbfmES6S4#rZM4m`V8o z+rd&0>9DdscHuo>EBBtM&d99TQ=>Rp=a)0BvZi#5leUPKh>E>2EO={4*&%oq?Na&nlV3r>baU`^q9OtxgFb$~kmINc+*#>w1p;zV`D zFk;9OeynbY;%!Fwv#$tbV~>S(8UcyU>4Z7G!0fJq0L_=eHr-I$ZNv8|qbwW>W)Yap zWsEXu*XPb?~Q{BPyxSt6psgFV{K5te5CJYt6 zP&+abfMFjS&zAHGpmRT=+Sx_V43?g=^m%ya_(MG;{Mh6%Q*7nw`FW2bP1*`=X0Q42 zYWDCuR0HPo;Z)9dar+FJZ$!4Cx@+;z8Chr{u@%_^vAl$DH;T@=0uH-b6Wp~dkiKK{ zUl9UekddcqX{J&}rW370f=e^vB8LMs3=EA~UTte9#}g@AGlP*JVh_Msq}9b<+(6O} z_B93yHRJsY*NKJFNS0p|L2HugQYD441?UO|2Nr8$-{fB`KCrKpxlz@5Q&YX>G0!ILb=# ze-^0`hzPMWdJ4mpZ!<#A))5+;ctJszT9zH)bCNbHisMe<)D{>p-q8&R2W-?t>~j%4Y~j#z_7Q6Y(}0*( zRTDu881ftQA*}z%qJnFcwRv4o55XJ0T-(XhVJ;{M-?LquNh$tWVXiT=2df=XKF8f% zVsFHPf~DVtlQ?s_mjw&1fwMeKA)m7D>6Fe*XQ|&dhdf@FGR5nkXlM~U}i|89- z;y&KO<7IDWN=2aB8H{G@HEWMkBEb@oOLYKsWXlr5u+Eoj_(XoZ zcj!4FVVYXfiYTjr_LN=SsC!?cP=*DF7DG^K>K;HHQR7Q%5){tW)`>W|Wr zWZ@kwu$j?i!s#G)4%Y;9{C(jO2dzZtgVW-74j<-ZrZ*HC`$c z{rE*eVC+saBEZrLlfY|J@le5(eYQ(QQjEt2QMGfaAR3&o5D?L=IrPki^@E_`T*c&+ zVOSx?XF?*%BPOcK6b`FXT{fJ^aCAVDSPm14WFFx)(#yKf5;*GRN1Q2Ge0CF7;o1va zi9|-KIc;XKO<{~a>SM&SBk~9vo62WGhk#&VzVy-wTb;$$X5(qRjjrkzGjj6d7 z|NI0hl1|4I)$MWAbhV^VCd3WlI8qL5%Thie=$*d)>5SJ@u+H_#ez%L<99hZTwcxC439Pb2sxpOs{W- z?j=sah8MgTZ=Z6!Qnu+ZkHwr^C;zkcC!8r_v3j$kTEIoXDceY<9gWhnAeMkT($Pja zkVjEa23=`>{M~>uUm0gwKOs@A_Zl^2HYD`7E2a2nF&8=OMJ-OBX=oscBBF_efSU1p z)zW$!kXsf9fTXZPSNwAsqHBoKvWo3lfjr4?ZW7}) zGg$mHy6A-HO!P`ymKm|XWXIygz#X=5`hDzO%u7Xv*sy{9eAa2A=VzNemo>-n2V`c7 z4fQx=$xbygH62iF&2GoVKRW}{BB2ZzQieEwW-1526R!7Womd^g!=`Q{`JADX9Jj}4L)EY-(ESZ|N_Bu&<6+2d*?`=)e)&wqMswoSR%#W>L@XrsD`CTf92q}j) z78@~##I)Iu_Fe=}u#PO9U((?}T<($97ue?5az9seXa7urgqgHPjfpKcd}F~LjxksG zNZ}#+f~Hd|=d)yn9+T1A4vu}p&QJEVHQ+4l3gLT*D(lb$*;BW0==nL@#L@x6rjPob zx%C?bSKPuDHmP1NM5v-Tl+jS??wXLMTo2WiKWmp*QGnhN1~pOzl)ToWF{jO;XI)UW zF<7w@C21X9eZ|c~NZB)y{T5!y5xwlDIlu4O?$^OT=j2lzv90O4CR}2zC1Ua_P$$H0 zO$Ytz6=XoTrX`fG2bp@Bx4z?R@z3Y6O7`(bmyNPcvnMld&7tRIRc~}}ds^9;z=5A) z5Pu?nDK=L6n*7hu4LK(fEg@6mJXin=$+jO0<>9P zSNwBTRihAmolFAm#U!;}%%)l8G}#?CS4>K*tfq}g%i|4W4N&tm`^+)3L^SlHN5zt+ z_HgR&wQpvRYsAFNY{F5nDgQNijrfx=2s|oR*Jge#{@F>IKaZA~EsbD{E>f~)frAXW z7XM5lIv1d`X&2#sR$$JCjr=wzTt6qFQ0;ig8Q zkiT{ueeHCIo^yA2vy7b$l6z*`#**wnLT5bjn3UVyJf1Mwlh+Bbjwx&5i18_`KT~ri zs%^}pAKRH^uSULsL`N3IV%v+YA;XHvqSJ>5j=)| zn6V=3Gjv*qSv%Z_0pu}sZ;D0!el-)zt=H$AvM79PKuox3zPIM<@{Nw%z2-;+s2Xz0 zBHWvkoncoB0ncPDt3BHTJ0LbN=rWisLpjqO>wwp+yWHe3k%Ue0*luBNojoW0I{BZi zpD#CH!j#0%N>=PKYSWAxz*StTr$%Qs#Kqbhj&>|-KHsf zuds~c7Vg7ibE6H^<|ZbHD6|_;@y}5OT60X1qKQX4{(RzQ6)38^mXg#`gn}otZ8M60&H_m7(S=+rlN3Lv=9!!g8j=u7kwXia9D<5ps0N7X z0>YiA9^XVJ`di`LZjGm~_NMUqj<17%zI)N{GkIr<@lEW>v)AIE)pZoF@H|bK36f=? zH81Am+WgNQi99^)cHB~rHUB(fd@QS|O}>eBlRVZ;2yM+res2EfFEx)|c&|B?Y^=w% z_-B0WOIu;ormY7iwQI2W=fmYy`lUuqY$Zo3nB=qDdv=X4HJ(b2oj)#R;XePa@eJYO zWNhat91Hv*Cf`qu6yP@NcxsA&&gL^i_qC%rX*-pijlTQO|2A|R6_wEIaGq~9F5ftn zoHdwr|Io&NcXSgnTnGQWLltr3rkt068+_drCg4h(E6%s=I zb`#i9o^-Z}2@ifG{*lb@)1HYre53g1EFnI&)^bMROiZML4RMv)L^B?xg4`(Y&6gWD z2#yD_9X!l?rnfVCtpj(cZdK{c|2%7s82_cl)088|7yo<+=FkYS;>%*#bXcOJG=u2= zca0HT0gqDqM!rl8Ac~7U=005V17^VQwQs1JLx9wr5eU@kFPhFyp!PijR4%ChHX2s- zY5Q3zX>zEpwab6=xJ78l4CzL#ib_*fd%HQF{csr$-~8;2i=nzU|8v@0PuO?zL|eIb zY>X8WgowhmYCRn&Nw?y>=r&ACPCNH(N(mHwk^iH+zp||fsQb&!hw~5q`CCoEV%_vp zQI0lVDA%?4=d>%B?FuBvB8bM*gkZKe*_}h={g5>`t9&FHTmF|D&u+E&=cxrgw_98v z{~(cVEZ(*FXX9%>{CgCL^=?!%_ZJh7)kNZ zo=q*$)*F8kv+rpufdf*Qy>X@7;-9~?H4=SUe78LF;+B;~vrBkIA_zl|^gQiXq#<8f zUYic<$~G8c+wC`y%5T$sK?WNbaxMNjEURN}*2*Mto{WTKO1*hwr4vOq>?=2faA*Yg zDUWF`Hy_#<{PWfXSq37?+&T;4=aVJ{k#lAc7ipu*Gq(1Z96<_;B=|`;<-$DfhUS0H znR3x6(u~vooTKPTAN)`frj3+a{PTVNeOocVITb5xubxu;^I3b2w)tFp4>ukqor@!T zN=9U8H$!-i6lXH*&>a=8_~&YxJ9HGQZf%dt?BEKDf2LUdHFMdwPiK$TR{ZnWaCfmz z7Sm@4j^MYQ9512j+6tOskK7sZWK+DGgEEY^u&Mo!Xz|a?CG47(TaK0g*O(s?4S&8W zxcVwZ(G7qH4!|isf<5))$Qxn&oi&fOx@s?5-5?Tf@zn7nWri7e6VU;oMk+D>hmfH zd{77nd4oij3tbu7B~tC|4eF5FaxJb*I$k`F-)4_h`;#_~kG}bz@4;AKr#mPY1je_v zv{C%?QWwXaaen)VUfYftHnJqzUFDH6K%4QiWnp}F32eZ^uv<3?oMmuX##sDw2L6#< z9d&xn$d-|AT~Wn9pI-l~qx9pq1xw3fn$cMH73vGy`aUv?Qoa`d{G5duZGLY0tD^@e zGx?vl*Z-}frx}$^POtx4M>kQxwfN`6SbgWq!dvgFds97-ob)f?-ABGGcJa&22Z2KY z91=}j)qPaH-MmLBU9(HZu4AwJ!Up)A_vWO`-(~eD^!$;0#E}2LuAHYaqR?DB-pd@1 z*JJWOuXuBsF#u$d;G_6jzr&y7*PU9Z>PK<}w1Tob_Bv?@R?3ZABLr!;!@>gl+_rxe zi?`Y-!kI32qN6&3SH?4&xop!92^3R}P%V*l#MAkGHz zNIKYNgl7a4rqOT$rL8$MYUxB$z+|AEp1@RjPr2i4HDn4}%FbXHmEsIKc8<4#*#fx8 zDDWD`eiw|*%pl*NuO6K+>Uhp$CvT3R6-2;R3`nn*LD06-u{~<@Dq>+Sb%z*fx`a`PFVv{Nih_yB%E?QPOdLWY_@!px zZ~(Ya8sg@gPj=WOpR~^;UGknO0FtAuTtqBqM3x^ysb$*sQ*ue_Mqwq;EgffDZ7LR8 zu801+M?)2^YAob2l!j|)gOO5Uu4lL}TVLS8U6iz?955TditqQDL(f4sdUA4cmHlQU zWV>WmGX1384H^PaQdqY&-NYxxN(4+I-C1Wd#@Cxf#(SI@h)9~Da3E309acuVyVlCj zbfFrFI@)a_z_5@gjas{VF%yxvV^11l5}z{Gcs?41Q(M@8&M@S}@iI@M^PvU?=reSo zX9y*&vGdY|NQhY`o`$J=qMi40JvEo-rUGI1r(pJ%c*X)bk-LGoFq2XW1r)~< z9i9chA?>OJ0Lgpe1obVP(tbc9LiD_6IoXc0S<1B%V(`@22V=bR+B7_dgd!mpD|!c4 zR~%k>%4Vt|r6?=*SwCm7Hj1mzfzL&|_>FA0xfkw&+d9)b?-w9g)Q1i<1K?uYT>jiB zcd&)$x)g+tFcpnnYLNu?IuoA)l<6}CUh+DP^U6VE{O!Z=z0XS)Bk8@WNVwzxC%zYMY_b_En`a`V@H6#txnGVmC@h(?M+45Rx-eaZqp z94Tfi13h+q$r{Yj6B}M;5!Zp&3Yg+P&f(7nAB~nwk;=TM##709YO*?zH-CW(_BOD# zxP`UJO-&q-W*vGi{@Lau^gtE?l+w}o{O&Pw7#!LNJ!gnp$hg=v#ii&VK$p#D?ENG2 z&l)iS9p8xZ3101uNhpy_A_<}$Z>fCH>7ZXHYmouVzLJ*^L#b{%-kbUlSgcp*=n0z5 zQ9wCW0J&;g5+0Pv|K63-ecnD;Jj@qn&h30Z#AH{u&7`|k+st5QXUcZZ^(Xy<`2j>r z8i-l20lA>%_gj9n*C&{_`QMBNQOqJvn|3sce@;vsfhC)L@oA7%h5(2H6-ujm*=(!8 zU~1-@a6Xdjq)gATqfUjOZ48$7!=04kpW88aWtYtjS-4LFjRKmmwJm?1{Lc@2UuXhh z1q9@MR?QwSlJi|$9^RJFuqHzZCdsb`N)rHr*03PNN8ym7NpL_<3JJiGdxP|51>3g; zBprG#{@IyKE4GR@e)#~|x+z?=wy z6`bh~J=+el=AMJDh`vr>?y4q%!T+n`6kdA9GfCs1A0mGssT~%dT)QGA#PW)X>Q zD8Z-%s3*k2<~JQI{&~lFPToMOTvl^l?28e}#GDSchtx?mYchkCtnry*^iMd-@R6lg z*q;2)^ydiRNd^Yj+5&ERdrg*-XDQ28$}Rp`8byq)w46&2(xao8S(4;ES|_+JPs4*+ zC_6}JDRGema7go9dpaEqJ+~maHU%Tqd^q7r_Rqsbr7YXg2t7mcI;$;G^}W5eIvt}h zJH&xNxrIZ|nJpQzuDO7NGFQX&cznL+^Ljmi{uMS`y$}sE7lW)eB*fy6xcFb!5Bm}0 zPupp-wTP-uLxkjtZa70|?QB8@P%wovmLDH6a{u`KL8Zei*kza^_NJp95B@nbmoYRm zW{(8#c?n#IolrQ$Z97U(IeG?eG5=;cZXQPlK zXy#^?+9jmopTF1iDoOS;!(&7ZTm!9d&Ed}#C$qbLtG#jwxnlv|AH_dsEHlj}{Oyvn z{@Qj_y=`CU_($>2rz2T=Y43S_pJ>94R51AGEs%?%nA@DD^!h{cVcXqoe$tO+cQX>sI z=Tu5YEhT|>56a0LXSXW;`OnplIhUD#zh`o35|Vpnqrz5jpcNsS|2a9}lBD*raCSdd zpcY=sHXnsH+oAs|Y`JUt6kZSSN8%7;k^3x6*uK6}^q0@2Sx3s{iO4IcMK#@~CNbUj zn{#cG8+2T1!A#&38VANGJ;@|QL%*f!fVC=QIOE1V4eIU+H`L*!@j zf`}-b^3NxI{~AwC<4X+Ev=3w(d1EdC@pPQ##`V#E6#tww)Y-;J00Dw@#bl>Vn?JPq ztij@+BW-0oBMp2Y96~lp2p9dCQx=|pq=#=z{$~xz|FK+nC(Y30O;GmRp4zi(u=wZ4 zH7ER#GdvPlR6P5kJ#n2}@TRFSuuoL2a{!;ro4)Jx#EB7Vk8!W~XN5y~)n6&*{BNmo z5{yVycU_3S9GZ&daVhYFw3R2YW$ENdWCkCvm=2bOt2F{&YtDfAUXd!bXJ7xZJ1niC zlpBGliGCuD0!}1pY0=XOsNJI*KZ<|m5#T?^WJnrXSOZ8rO)>f=eeW9BC51_wV|XZB zZ^t`>Zqbsa#H3QtGtlj5bh}ExP_{AgKLT!=TLIPOv)(fVu3Z$Rc?ASOqvUEJE|51cbU{6W=(;5n z5VqzHbo{*0le3+XiSX&ZXPmQ-p6MR^b2JBq(?O?6rkiHAsX!{7FAUr1LRyi_4jd2z zJzh&oUG*al+wCZn+YA@15ADoyTDx5gI&x~@ZL}hG<0C)wgypq`Lo_R3ho199|FMs6 z%`U6dst#zE2%n7-K9zJ}oHC?_@|fTT|6HoW|I;dkP*&7%TnFsf7T&nL#I+Pk6y-G z@-2`l8xZ!pATeoGTxUu*3v(d_?YOZcv7HQe-Gn*L#8#LtsR1~=EH9ayLEW2bk~(Go zZ4qUT&{Q~6k)B`BLu$7CbHW3zq%)TvcSlC?&qy;Q%&9Nv#$TqIO2l$7v$%)oeH8y3 zH}x@3bRq;UfRyO2DUIoN+1KQMrb@>uoF-gCRPm=o2amkxx3gOn|17+VPr(f?rn?T( z5IZ`#T=RVomij3ES(GoshQbNH$qnD&$Wj)A@SXTnC?2#G*F_-u1(r3W#rZ8Jj8WafXiSw6V7`B=7# ze?Dzy9Q)d48(eDN*rku+pPi@sABP6D%>k%Ho=AtwO);O%n)p%t^Yk1MsRfJnzD9O- zeBsRG$!&A!+2A|)HiA=KBiPt*(9mvI`RU-$Gpe)J#zr1NcJyNapE8}JcNhQsr$O&( ziOUa1jg#Q&FE>R-I;uzU&sJxa6LH^ad7(EkMT{Mc4GeH*u@E1{KTGC`6Vldb6|yT< zKMC)sb3;q^Oo2EWGAle*a#+m8v^g8k=bm^UXIqc#0IV%^!sI8=9Ilz#MZ2uz)jF9F z=)tKuLH7wnO+tg@ppC+gpP;Km3}&i0eeM1i8Yyx zAO%~a&9aqP%x51&04+@rFe1sR5ThxDNP>`9keOj|@vqf;WBGC;f1M`?RhnBk~ z|I+SyA2BXX7OZJmE-NVhSr58T;+!#BFy}Jm$6&`ICXtSSy`ksgpR*3Bf?!eTL39;v zq*l{K6(f*dH>|)JN(%BUKNhedQyL4vsM+FW!$lpprLeP;*|cgo66Mr5`r;EMH>pi8KLO+T7^U0YfVMR3Q_!X74;NZ-K`@glJn@=C8!$fSzLpFaFuW)GOCAwN?sIz_Lo7h+(})7A^i+YzvW^ zf&DG=pMhC~SvvYb;yqk+@z0Y}v%NrScg+FbKt5~5(V4tGe z@k&pKQKW#_6RkO$5jzK77QH2oE(c^Dr)}}iKhX0z(Kj1>#Rlh{M%K$5Y<$W`@z2M} zlECi_m&^#JeLqu6k0r49=O_U!1jyN;%MI~^*4Rb$T)SxT&yt-|$ii>5SvzS#cp%k7 z`~AV24J#=AIc;Q8zPB;lGaHYanPL9e6{c`k6|;(AZ7!B%2K&WCl8m6OnbYcCv*qi4 z`v+57N8vqb^TZf zHUvpv$ZUpk6wZ(dQ)84gN-Pw|SY7<{kwayX;OH`Ji0MuKjq{X^0F@1NN8>!wF!m|j z!}q5g=S%U=9)Ex2{VAFLeabPgcR&6OjCUUS*s=4Jqr(>e%$t9Iq{sjDlnv)+NY-XP zn)@SvF&~!VcIedpdVFosi%AbLZGKXk>Ad!Av6a?00u+!eHRnW=2TJ>1^U()Wach2g zsM!wnwdUob#XmD<{Eszn@a#wN&uKHG+rCliTw%O=y0UspQA%qdl!fn z4%P~>arITMi)it@(pPM4_B@uPumRE+-hd}hL`lSAqjcYn=^WV)!Y{Ia%eWQDoMXmd z;x&!{>-Qs^4F36hAwp|168`ZXop6!WAH_eXW|9k-Dw8FNDJU+RJRtyY=2F_sOCOW} zxsR5-IXGnc2Ll5ImfqLj8PeCBHTdUkFP^alp5I|0vvb)VamZC3D{^_9XA9zUVLrE` zX$m@v0dbhQ+YL@!U2tQ#ymwMw9xswSzvc5$J&J#Zew%4+7ZGl7(eYStk0R1Tb`E)Ra=kt#q&k%}( zb3v%v!N||;m_1Tfj$jB4^)sDoKIga1=7-wT3RYaFkzuFX~EK*0~OG-7=Wgh zbA$Zt4#*(S(Z9 ztC-3IUqYQ_vd(y)!fgg&Zj*9HFQKOdkrmIVy*Y(vnwjo>PV+Vht|1yg-=CvlAy_Lv zbEMgQ=%G9pcrOPfudHph(gkr^V!IVD)gg>s9;2N9*;SNL(c>p zkWv^ev}?}K^v@yGZe4&koTb7$zHDh~H8^;9YJUQrdUfo!b+khldPaH9%)G^7{MWn4 zY>t^N9C}XYvI!l&QN87r4O#>9sExOhaWLNuUT(lVBdmUYk1Z_oJFFnyZ7R1n^A?_lUITU(Aq-VRbx83L_qzxTBwglu6``27Cm9bb#&EBEMhuoRjf z;)Us-c8!I^dm?d^*Z`bc%($$w-QoNga3s~n3?kO&(RhsK$o#j^YxYx}jP|VkE$+LQ zV-c(=2X~%kgq{iTaT$1IT7#&>3bb_oiG}9W9D23_@Gph=FT{s2A=?7EPC=`knHI43 zb}&`Ml+VEU^5cRt(LHZAa9#Ga7=h`B!(?to)Ev>36nzDYfwq`ifR;s_*{*Vw{AqLO z*}+{>&OE<9vqe0=Jp)cZUwLYw$B}EGU8iscBDv&(?!^oe6b}l)xbng1ViN)KXAavo zhn`1*zw{YnBl`agi`})cNj;mZ=-(RnW9WH){PJ(;VE)dDsJA$q^5gga zzTCIM_O0NpFIWEYW#PdRDgNW)_n$T%wnFarYkz+qlLcb=`_f;(tc#>h`M)lH|6vs| z`G>w=`N#LOrxc9ny4Kh#rnf!^?xD~xy`^i?zQ40_s=hv z#rE8ae}1^|aKp>}X`8n9cJe z=LJ^fd!ZlqCLMWW@Xu8Ihe8kQ4+C}c=5f6h|2)ljewga#d##$mRX)?*{Li1YK2u@u zZM_rILYi;IKVt(Aqw(WmG=6k6HfQxMu=>2x>xUblh(DjeIk(LSe}1@drw2E;bE#+* zUdXA`3k>^k!^6lt_xB^$<-pOrG{bgtf3$7q^YioObef~09xgnLhMj6}*c7<8Sew0= zd+s$oKRn^NB(PrTTWypUs9l*;j$g?iRuTkm+*7gP|Ae@j(OCR*0Nxds@|gBHY%Q;d z7#k@d_f~&09{WDc^5voQ<{G+1%A41h(hntFT^C6WxW0ZV>ARHR^XmJaJbN2=-83zi zzua~1Sm$`d%i4**O6G?xeP%FY^uPezE|D48+1W&>1v?u*-?j0!Wdc&-KJ`}o^E|ht(8eg) zck|Nsdz(Yge(8^9ao%gg@Z%^O{7AE`ihnjdE#$uY>(EM8(#BieihoXtFNB;Q9!f7< z=T`i4u6-^2P|~H1)&W}iyJUX(QhJRcx8{F#@k=Ydlyq@XaJQGx^Ousof34Rnoayf9 z|5e%z2}6$iDB({lnHNYDh8IX`fW-m5!X8Vj=d((JK0P~Z@y|i>1}YgmXaJ;nP$nyg z$lxs(enw&NV|+=RHQAP|iY=^A*i|U)s2)*(hIe}Gpk%zeDWM_dp(#AZy<3`juYn8Z zIX9%;!C9txY0_q=ywxS-x3)K+$h18-rRKCZH>``~*8I;Ylp4O@WaY}Y?a9l`2mhR5 zN-c&jH&tTUf@$aPmA?9vF9k!B4+V2=&kmOK-nDy;X~(yMovhtqi+}E;-Syu_V{?>A zBCoRV9s52SXTcW#{J)LH=5)Fh|6Bt4A4X#llDD+~>#+S}G}f(iEB^U^8;wmOaV!4$ zjK+ws>2j*~>nMOSDh%Hk{PQiSqLxr7pL`$Vm9H=rtE3nIoFC@Tzs#sH0L;6x^z>sW z8@8DvyJBlLA~CFg{+9=7&^Yvy!#ppA?=N*S|0d*peqsmP1zTLN)GFN&vLy)ojCrOn z%_kenG_$Or_~&yek0_1$H(Ns1Ul{n27>j?-stAYtBF99A@uUpm0^5=<+Yf4q{1Ztf z#vAs|$K4;R(WEb7ninqexLh=W55yl6t!CsT31{1G;Y^**)i6x$3}}bZbm*`D>eK>aZ^b_cWf-#1NdRVl zno#bf)0v1PZxsKWY0Maw#d=bPC<;vRIwt0bJ>%su3Q@qLS0|J_WQCyzVI0#PxWTqL zq!rMq<)09j5JLCcY-8D-vgvFHRozw$j(#)&39!k=7!I$(*)}rZOQOX;m)EH^ylkMI z-gARW!dMP%6#pFNQJ$vuu&6BgSd0VOO)lb1io%uPaj~8TeiU?UK(CxN!A-;$lPvsp zxanZ=&$%NX*kNN;?4FrnioBF5K6-bq zFrn@R((va)z?mD}5Tv~y)EZo6kS{?&1{WHXUch&OBOY}reSz}y=W$7p8r+}Cw9}s( zgXh_khPZ5UL9M|W1ws^nlD7bQLI3=D!hM`ui`oLy3a*PspXWyHX^PsD>21T29JP>U zs5x-BfbD|LOFGj)bAq@Dkmh|?O7YJD%LD@xXiE?)?uAMBmz&&t?4p^tWZdN<@#jl0 z8Zp-fLlB#8z!JysTl{kX5y3eGz7SMGDu`z^=HU1-7q*(53Vu*)T#)fB#$g!3je|BW z>lo=imNwk)2en2sj5HmWuX}i!;}48qZv%cs z0Vo_-UOaS3JQROgnpQDBwW{dY7>j?7^D6pDV1dz3sVBSOU1jC zMCxgoXi9$=j+G8@HpD;pXRL~WG|4iulW^4fQ!9gd4iEr>-@=-S75P!ESV58k#puqk z?az&f+l^aTe-MKo!{6es@&xND=YJI{pX2}=(M<7GHg>oe?qXt#(J{8fSSn)&&1fBA zyZGlod!mSDtB!HE?Z|I#$Qs)W-nYR&tFcR3!IGRu&&yeP{`)v>&hNqDCxOJgn7~(e z8?pWAZI3>`4gT2;@c_X5ErhMT5>g&0X%qhM;v^s<+0;nQ1KE#NE+!Wvsbe^M7_7JUA2&%{9isO#_W4t%yrMMaB6Y$wGuxUIdwbNwy~ zd&jx>=jqJ+J6(E$T2DhVkVa(%s<5|q(DtB4AV32)1F2}c3hs7l*3=eHA*Lxe@KY!x zy+x_R=)4?WSNt<-X$ODFYG@F7D%>x@RH^~t1aujCp8U^hTNPT(oF#ukq;ijzW|Deg z3zwcz?Q}0*FzJ{6U^#Lb`3$xjfA8>k({sJ@*W?LLFM?yY%?J!%{>}7; zT%9er#Ogg=ey{)Ktij@+c^F;z{cN*u5?Z7DQvyWGS{}3n`Jd4!J#%mI3lSM!-<}=b z*%a9v*SyUiAL4_L5w+eufWbdpbWZy7@1NISkac2l1iav@#^Qa$`HN4nu(JZ+7q&w^mR`F89`lmEG%QWFUkf3I=ANjOg)JmVaCw%?sg@LylP zp+jQdDI9ukje$!9Q2lsgzPtIX?!iBQ)|*6YU+dPnG0`;p0zAfnSR{ZnBC9z^(zDe#Ms+ndC{(0(|BBf`|>R^hun)>a|pjR3LZuWnV`=b^WDuG5_lW@^W){mCZz(+ z+N6TF;-9}f`g0tn3LRb;HeY?3@z|-a0|yw89dX+ndj2w8zVf1f`R{DU#Xo<&^!K;R z5`Xc}y=;S&f7Xd!0Y1MH9&w|b+njZDhA#B{cU@n9Im@c}=dYJOzok2QZL+&t@y`i6 zg$9u#f%82HPi)wds`m3QO6|+8l;WSKupenv3X@N9r5B(7@ZG<;b@##IpL6&82Z`{) zy$}1C>}eu+;^@b-XPk$(-V3+(7ez5?{}S1gndRuP5$Wl#Y2`5AcIeBLaI5VM7XSR& z|9^jaS}C{q=YJRZ{B^^=Zc!lq*GXA`x09s)hr(%pUT-U$4E{Omp}O>3v=YC(jthJ7 z&s+Em$SItm^4jpaTk+4nPtB*$dEcAQN*Vm~DcqHis#2d<7w%T3L(4Z7 z8nT6VIPQgKUx1a_X3QjQ;ce@^a2K7?SbcDun%j(1=)7Z!eH}ttkI~e82IF41TbZux zbPxV{D){T)bJKzz!N>k8^7%udTfQEFs)nRTz*UMm!6#u;hxy~cIYxmxtJ~zQKR}h-r9drrESg8YxfwJ+sK!6s8?chD`kS9 zm`nXpMfzr9&p9=>;-6!mi+=41P+s=be)o;Z*V6iRR`=kafxF~9fGKlZw$4N?!-XT< zM8b&*+BRpB6p22czx>XLI zZ)~A@FIOp$SWb{i7_TPqEWO(&rh~;lYw2%-M-C9Cisl;$^k*8?ekaQQO zHviJy{L88@NsSat9DiH5lByiF(w(XCO$sJDF%X)q>XN&jE)=`9P{j;`Yy5nNGAD;# zWDHJ0!Jbl)($X@Q;zhUS(DTn2#u0%8?8%AilQH`J5@i@TPZFlnjI2?EB$)v$qPoSA z2{lC~>;){vdFTeK)FAlBlrP^!yZ7)B3^@ETu?_oFuyPYJM*hnpab~?r{aN zWM!>E5azHhJ~=W2v4H$Ar%MiThkyNb_y3z8hK1%JMAR+VF2?<|(Mw3<|NTGz-~SJl C+JI#M diff --git a/Push/Setup.syx b/Push/Setup.syx deleted file mode 100644 index 46b5814f2b911de0d8d9feb0cff13da8255499b7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 8 Pcmez1&LCRE!0;Ua450$H diff --git a/Push2/firmware/app_push2_stable_1.0.71.upgrade b/Push2/firmware/app_push2_stable_1.0.71.upgrade deleted file mode 100644 index 231a0a36ffad30ba27c41f066b3e91265b8ad63f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 62720 zcmcG%3wTpi)<1lbl#r98*-2B5CDf3%fF&)pP-rlea@qnG3K&2uB7>z6rvz}2i(p8Q_nCw z{w+;pn10XEFvA=xW*F&zhP$M4$qWjR3(fn?44L^2N^lIR z%4NtLl|Yi@G**DvAr(WEaPv#`_x^YTq>!F46rlg1pH-@ zX5R?k=f0V~dPjoeq+=g?w#>|M3YI78U?OK4Xt}iATxc~Z_S+`dp0O!yspeMgB<)jL zrB>&nw3TP*Tti(H>!A#W@CKm|L+*6bG*DGiW?iabNLj8bnIVFqmt5w?CpStNqRZuz z&C*a%WtY?{Tmh8H&Q)x_3Tbi4)zB~Kb;h8pWlBj;(Y!(~qhj3QLN0BkKvE4A{a%@* zM<{J#xlzJxWS>}{vvRY3N7jd7o=Ca9inNu|i)ky%t-Ap|oVy7W=+XKr!-?cQ(q3|f zJDU8UR3Nu`v=^lw(?x+e3~4KO+H01*0LoKxEA)N4wxgyOb>_N5!6w(=59wU}k$2L4 z^22ER?su12n^7k8KejJbN=x-c&ny@FgN7n*i7VC0kXPK>txaseYSQy0GwWn!LS=pB zO#kQp5&mZX@(9C@h;;a7_!@kAU&vAJ*yc!O8pVYM<~zA}7Ph=@qW*Rx_JjXl}R3Yy({rwCl8TZ5lfOl#Jh*#`bsRx^6*O zv2(^;-l{c9#8Er#4uC?&Z3;@n_IA7V`&0Z!v)uLpY_*?`^yr{%&UiX|l3lHI=D#ZhlR5{mw7pB+Dd;sCs(0VnRQT4P z!y{ekM*cg?r8Gr?Ypr&?R-;W3X?;#b{ae_+E{43I5?qC^r{|}*TCFMQp}!(;JN&0q z<<_PcRknQIddMi{ZltVOdrt zDidM2G=05>rZ8lH>T1r{s-{V0i<4Zd@_~!xttR!9d|l=GN?YX-|8W0q|5E>tk%UOd zSMJ;9OY;RBg^nuB86}&(MOr$%RN^UnlH!}DT?1?qv>h{C6&;NQfSh*7nO{{|SQ)5H z^KbK)`$K*`=3taJogPtr%Z4Rt^!a@B;*s1FioF%z^Ol9x?e^eFeT6pOlrN zmc*0kTGb{K=hx4w7T1VBYx8UT1^!mQ$-g5;lZ2~KNI+$eNY7Oo6GWP$d^mv}g#H`B zrJ*14RJ-BR^?Ozo<-fGe38;TTtUrh?M|dN`za9fAxG2M!6;-d4*psW7($rGQm+8E# zRgKtHm8Z?Wxz7tD9R9`f2dA(5g&r=uc$1Ze@dTp?)I-??h zy{cx}q){15U2p;NA1-65@|}#Oif;W%_p-(a!({JgmDdS|H8Rpi1`k#2Gnl9GZ98t~V%`{!kptuuC$@!#z3RgmKYc6Xl|ushTd zIa2!V6-!q)x#j*mTStW~LV8J8->%WB4$|cACJ&cflbJAgr1B0jQYB?Oz%hBvXXNgn zjQxxp3Nq|4(h=lUOI44C7*5JbI2p%yq#mqR4^t{Fm6Xa#Ws$ZqS0L439y6vGa4iBvVOCA#M;$lYo*TQ*(K64HH7!k-HW+)<9~a`7gz%Xu_Jc+&qpC-k83Y z6Lf`BnRr@eEw@HjI(3^yP)NB=_Gry$T8fzFjKR9GTtN&2r9)xtC9fNtq9nE?cfmk4 zb9cY_+)?av6-mA14N%le7DJxNGNdbd$wSIsa@Md0EjXu)w&$DjkHwZPR-Wny-e>q$ zuVCMrhPD{~q%WFwx-%5)N;;bKQzwPkIMCA5OKvqRP&a6LiBY+2F!)lgkSsmCn-iq6 z8~9r;D`nSkL^r?m(#-Ft`8zMK5y<}1%XtF%Cu$xxJStKxs0DJtSl!Gks=X7rr*#|I zr_tKvu!=j1oC!v^Lh5<#0n2%pSI)V0(t8AQkLnYYOGNHajT?Ni4CMyq4kZKV0UZd1gVsHM=a$X%2NFcYc z=%@y=6-QG}1p{O~w@EevDPA#UJg3-dIe$R>b$PpjQ5fU1oCviVO!}s*0IBEnvc`OE z&^I-VwxPH6t?)CIQcoKDYo0C{D&=12v=M!K4aWb*{Y2`hvenfT&0WW*vXg9!WQE3w zyn%fRdyka+ljJ?uZJv)Gu$(uB2Y9|bw69JD8736m$E!>d&r|#a{vPCgqLsfqeU^Hw z<@8tF>W!^!)!$i8PixCYyx^S?ZIX{Hry26jyM=8*X4uQD?Ze|Eki*|Sw&_EZ^S80Q zMk{4fY3Y?in}(vaqpeE2j(OQLq4t&s?)Mu7@@nbnv@bML&TZcymTU{Q z9rt(sbH|Rk-k^Dp1`uoJ?pWAMw$9$KX_*9natIRe<;_PlQ?&lhOFN3Dwe&Xy#{zoM z+!l5U(+XI_*rUNI+Loj#j1+5h>%_Mj+QP%wHFcv=>I21FprLQ+z8y_(;3VqxdWio#7Zmw z=e>Lgc3&HLyS>QMIwzosBY6fs7)M$RvL(IbTdce}242k@8P6>6kvw@}LL<8{>`a)$ zJ{ImJ-zk^jM8G5GJHuxQEwQw@TP>lD>}GXNegK@q}Efcxj;JLK1mXzCyBQAU*vtY#hsTQ}kEJC2qM0^@!$+SXm)K zfj)0FO0ZW53R?Ep^PAz4$QyDBmr61wNC|NsCHuyAmJ6K)&pluX!S#OOJokX*LZvIc zK*1kt%0HM_l`J&5h8!CD;jc%4VGFp8`fZvYMBI&vzwi_vJu^#v3h=}G?P%ddHLDF~ z!=0ayoqpPudMNZp3%g9OeM9P}r(%Zvik##sz}sFM1$m6hTF**2%IErU#Ta$jujD-g zxZ0GC)RzNE2RZ~Yp!8UJqE?H#acJ|LcMdg;ebRFJvU5rxY#dJ>H;f(CNM4@2QOBA(Y9{rs=gBiD!+_CddPB zyNn5urNLrB;t3I(nCm60G&Nc$>t^iPo7^ z(I>8BBa8m&dAE6^>@_3h!wmU{Tgl2G8~%KT;c6J3lQHElM|rUHuJznnX&I>qE^8KK zWoYdh!G!!W(i|)SB^0tYxL;d6QvlW)t-#hW0!hFbI9)5Kz2q!#<2Se*B?6kB=77v3 z!e|o)Mc&d%JZX9_Ni?nz>H*VB4v3I=v?UOv(3n6Jo-zh6AVO0Fy(rz>Ap|Jw`e3{E zm6<8P?$#;-8SI*l%N?s{?w|Wa$Fb3C0-l*q7^Rp8yo|@XJcNE9&E+T6VAmd8YBLayRdpuF`C%uWSnWkk!nka^BS+~)|?vC}gTI_9sRAkWJF5`Hz8GVta zr>zT+HNkqMtcYCg)97hp{uwz-%aj>u9WB})AlbnmkzC0@;I$EfgRb@1v~ zm}7m1)4oxL9keBbCl|z;-fJ~Yz`RD_Nq11BLmZ()k*g3=V-N(=h;lqph^1!bNAvS< z7#p7OkkbURC8Jn?49_rk={y2S7R#GRW2}ZC>PYDPpe?3*3!Texf`IYQ?x>#W)u_eZ z56HA4#eKwnYQwy{8~sG*j6jxR&U65Sfbpj^XB)3lOZ6`lwm{0VB%V1z8|NiWzv@ka zyqgj2O_SK0QsLB1z2#J=ikRCo0F{i0K}visk^j58La^|jwfA&m7g?oS zq1bCbs@n)myof2v`@6yClR)QBodLDUNKcqFCWG|4|+sT`gfM| zZo6r#<$NLM#cu#y!RO5*`JBVckjc3}cs1Nkg^Br|WuafJCmY){xk1i0E=`fy>Ei0i zns%*_jF^+;T0xuJbpMQ2gz_rv2{#_)qjhS&L~4`GdA z$B}TI)Km8W@DH?{KRC{EUgD4T?A1Lbg?xMqI9CSS{ban3uN6LKLeJQ^E zKHgF7kT^Ek7wE=dcYaip%syWJR}IDXxIL_E{rK?qtj?2fjjuC#hE3n*nLoXC@S)U` zM`lecD^1_^b?1UzUk-ku{MEN6P1AFox(!^H?uQ8rQ=2x-sN)s)+oz&E@n}~G9823( zSzXCj?)RtoxBJWd-(wGq=f*3NwNGWgWegBKcS1j3!;?kaS$z@ayDUQ6x+iIIEH*V8 z=^AyXEK-s5arQ=x_SalvqM%r0ck0r(fUY)4-8ojhVOql|szc?I>CF3_<55Bu`E`4x z*rOE<0(pY-%oNC1nq*F4c53KzZZzNN_{34^`^hoJ_r5RP*W@U(#+gIb zS!S&@$y{%(Feh7!&Ba3$~aL;$W z+I4T&Ke~S3bz9dhUGbH5l~XDY`-l18@GtiN6Z3C(L;>Cw$C(rY(QykDkK1$1N6a(L z8_YZH3FbmBY|gNko2A@#bE;WE?wq*ap&-9ZEOJzvM}re&x!(eeYV4%aKgG#KP?a|N@Su$7W_f*4Y3-e^bmG!fiPIRih8<&rS(PGeY2<9crDhZugtsy z>onjS?h28|R5$8=t0Tr3jFcGGMI|*NJy9GAt(FatQfS;LjR&*`v!%dOD8{3FUgEp6 zN5*}o+cPT1bEF0!OT&ec| z%Rfdl)mR^2PueuL_(8@JyRN|*ZzDfz0`Yr{NedY+UGjR^pc!zG`ex1j2ThXN@J_pp zjE3|oKU9{V#%f8myUA6Cu!++( zj3+{$GLG4hfN7z>@-sT99HcsWwJXj6t1Tr#9yD%E6&7y4^ z-CmESnNFH;m$q+&a7)^f=EE#b9T^q%x5$=I81-Z!%9w()hMEuyLCcmdo2IZEq?;TL1n?yn4xk@@-I0QvVD$yNb9s4xm-x$(}P6IR7S zS?TGN*TdCj-W-BV7NuP-(k`gSx^7{UCuRCgBs)J^Gk}`H7Pq}nlpyZxVHTo4M7r{< zyxC;kZY69P&PmZ$^JJPPNo;peGaCMl5MKvG+WKzDBh6dd?K|!DXeEVMU9d6YB|HB3 ze9vakhqEBYozh344|a`p(Q}EH?98WB+_BYi{;x4A1oTLLxYg<xFC#RjRX3fkM|~%!)g^Gd zkW$Z$!7rtcjzf|8Qm~8|^5+ei$!&RnVn~ukF%*@4m3r=BXfn0PsIkFHLl>fT*;~5!tJ@|5B$!>tb|(#2em~UoKs(JOyynx$LJ%8uOyzd!k|K-jFElJ1 z2L1(}LAgUckzeBz_2j{P3VX8A1=#O2L^;UfXmSkLuRXNq&Q0;S`oL67_y?6ZNwkLToG?0pX4_T(GIkn z-)V1@(sev{XkNkP%^!sI#w+E0xyu`De0WRx8{3er)Gs}*rI z;;d)LJXL2USGlqBUjH$FuK#WSqmhY`H9om7fh`}$apfC z_WQ~8Ss}7aOriT`h-`D$L}oP`;9I^lkYMSo0alQ8E)c| z*ikNq_%oU`9Y?Z__lKv~DR2tn&6LvAA!+PLHbgY;)FJyIU&%NbV%yz-3eL9;t{g4| zNAEbdM-sR~v0m#CTFd9&f&OKDnKHA1boRa3Nt@z+F5T7`BP zQ|m2SvR{nL#5+()c8IX(O@=i`+I*9JE}zQR?O5j6<@g-(mDlX->?Mf?=AxRsB=5r| zu?Aq9rc#rmaxW=^e+WJg|AgE`o@}or2jm$ju~yYf{_&zfhH(8nIk-8jCpk!YWj0ev z@1^^=>Y=;uB?}DE@L<)C&p&k$+VlV9&-*Z(?HK-Gm7}mU2Y2wdu>^JZs<7@@hWtQp z7Yeh@RgMWy(OjDMqe_^>EV1P?@POY+&Ox`D23m}|jY=fJzmy81zTjWbkr(&Rr2=mm*$n|0Z_?;Xp|>ESlO z&2(&9o~n`HCdh7B8r&0Kz$J^}Ld{NlCi~dXlGXE9LW9FG#q-URFD>PYR?5D!{8X}< zzc>fbX#cubQ>x~Td+&5DT3aw&i0?Ii;ji|kz!syQ>wBv})|W(M1)LtS!dNVY+kn^! zF*b&s$IV166C8hAotmUf4q@%2`qM;fj9by3Hn$cj(RdNKO^3VwT09N$fLl#W2I)N! zPC$DRR?^i+%Ujs_$Um%7)CI`GooY76_%=y0$gzrOz8dElHQ9zatSO1o&y%y7+oZkz%-7o3%9$tUt!vQJ({6z#C#%A)nge`GmN*MOWWBfp>(2gJ2X z@A+{X$$=YRTBm^Q!mXS6Y|lg7PW8{4H1!@$)Di`=Pi|5qfeYNu@?`NDr*Rf3mHVc@ zHn>)Loz|5r01i>BN++9l)Er0B)(A6(Zt z0%cQ*c_&l<@zJkGeOJD_PwqI&`M%rVEPw1t%lQ^|%+$$$RO7}_%X!UmF4Y_`7dhI{ zPgm*9bmf5J7fQv+WZ;BVVc zq21?hsGL#RQJLf4A9+Zge@(6+n!-ZkfVh0QD}?GDO3 z&^d#`=o}gVYIGSbE`jh|j(%y@d!g(BZ-(Ct`3Bq`zCC2K)^@GGS2AF0cx%WKa-FQ6 zT4q#}mnT=9QwL72;?uCf?*hStQ|=MWH+3NW@KSRmG6b5 zV<=tqH_(+Cql*>kswE#!mYl04gG9QpvXnCR>bj$Jz3zqnx#9-wP(< z4bsyTkCn`mm*7spZG<}t_jUwpFvycygbzjPNk_0A=UN6jY1}OawWJYwmm&cozy(lt zP|$?i887n6ATI|kWi==OtD!lbu5_2g*K2CaTG*S7&JIj2sKl{0Eli_3{o-stMc3XsK-w>LPo(-bCKW+ReF~0)g%Wb8!Xv2seu36$^mb1wGDqV0M9@?qBp?C{KiT_y%bDo1l4# z^?wCT4``o=&}QwuKF0sP30fIx{a-<&v`rVGRo?)uV(;~q9t@xEbgIk9msx7EZ=IU( z%2du2rzV2(0@*eB0(lJX;$$_EOnxtPQH1p14UkU6AUV-zlQ~zz4--~H((6h#N&kY_ zP;LVBEYR9ZO(b#Wd!Zl2`txq6|8=ZB+bL;KiuDIcmvU?a9fJUQ6x2U2*6DLYoi}22 zXgNAN0_6Thf4S#ixPNB>X2|KVf@A+b&y-YgrkuKIrUb}y?eB%|ew@yfr*EKO$+iAt z8y>tp}kwf0JIqD|M^NHrb%hMRG0#2M+~Ut!tJJ?s1}rT`gq z=3sbeCtXiiY2g2_$oJ|~v6{BaDQ#-fjAjCp>LYr=4;QSLd&}Mu2==@?J>&3!Mx9xB=VPYwf&oE(~-9NIxT;4b7zgZ)QU?neeM| z`t2AE0GE0Ot>Z*|y}w+a4Y$Ye(RoMvbSX*rFSB7OdF@wN{^!|XBAXWddNy1i{pf7C zK0AIp8!~@08x-iJ{UX(KZ=ky8m*{Mu``AwVq;XWLJSva+BSk6mz0l)gt=Dd-RT`_c z9qpWmJ&%G_S^lDjB^ zT811=;czFTWy1kjqd7(CkKRCEmna7*7C^iA8u;9+rN@RR#wW&~khW@gE>C(QCAICFE7eZ}Sp0azVWBo7EX3Qf|{A2Z}N*gDnzs<&mVm-JE9 zi+#;tzC=$qUZPRmcx!k|55wN9j~T*3TS{gu>Lu^YrtcMoayp#-L*#p${5>x+rPPkU zj|;jL#7hk(HZ>N1U*)#BKY{ZxPv*l< zhF{cvmRx{eRm$S5Sw_aIl1Z&R8Xm3MOp0US3WN{Ix7wpQ-Ksrg4Nh>6$osgaB%Fjb z(?<4pne#E*S59O~lfYkfE*3NGyLQ61UOapLPRmB`UjuL<}Z%z^LHnkTuEx(Q;;|f}EXPmPysz< zs!?Fik`>CP`||MCL}2l&EO;t_yKW)bee8!hpB}4_4#LY1idlw}EXDh#ink8HnSmjH za0`>-!wOh^N;W89Y51Dcq;MAlo?)lmDXYs8`irn9U5I=UvH8~fbiVI>WsdER6bDb3 zQg*|$J;_8V&(fVG|5}@&D>NsQI(e!tKqRFlEytmmyj2py&F#v~`^*yb$fKJ}!UGRW z5Z?fsrT*b6^Mb`Z>9d6=2Q`Bb(rM^P(q1c&mo<#Xgz&&wGRGVuk!y86e{zVJ&M({2$(QR6Zcy? zVF_0T7>SDKB!3D^{H1{5DcBt59M>dmh z<=l)_#I%-g;E633@ABLU{xK=bggZtaa=$B*1r$l1SicEi?DGMph_8wAj~j&mhePxE!Q)yX`oxK>Cu zbY1R$z;d3h$krLO4MWiayqGxR%uf%}T&ag@a=p+EU0t=Au1{t-#8M*<&CBmNvLK(r zYSYltt;IV#PaS7GD%3cBt;Q!a%Ej_;qx`RL>ia$`h4tXj&LM1-Mh*x2PUuheM6TK^ zrc&!0a_id0&r`rJD~&&J?zpIBC_`S(T`bMRYalxF3(VE#6J(-%4w(RcG{as0xw||7 z9RuE#WL$ms;qDV6V*#ZvyQ#cFx{#}glN^4SI{^Ky6n%Ot?gA*JisbFMlcMheTQh2q zgSTMt<&j=rC2r=&_>MbfJ6>=|+4n+}11VrdJ*J}Z7VQ8MP9AW3n1rhTCj2cOC($Tp z+60bz?8WI9YgR5=wsSf9cP-R$4kp~~!g!17aa=k1WH zWFCu&i*Q;v>+!VTt+Tv=Ew7-9{bbATuJR74NlVV2LUqX8svXDIAQ>{ zRd|n+k%t#jg%06bZ!_f6Tp7;8tf~z*g}=?cEmskdxN5_a!;jzwaH+A2lWYu-KLVnI zb8XyNo-U)WFoW<{WmyCuBPvWykH<` z5}gzJ0`P{Q6V{S!XO3EHq_ZpyZCt9_ljTIti1!OQ0emD$)AQ#532BycIcg{RFIr9@ z541Z84+$vQ&|WUYD@Y!Cc4@9;v+FsAu-NHMQpzzHLUt<1iXg+~kyiO2olBEPQVbih z52Yvv!1h-``WQFaDc8%e=aE84Qx!oPDllwT=aFi|kD3KZw+>dox_NcdhZCqx)e|c( zr;io z(wi@=JQ#M3vvl6q*@*LjN3(}iW48JKTnKA&K3Kf+bU1Eh|L{J|S@Lf=A511H`DpaU zD!enP&?S>+#G66v5%TEXz>psI~}ek;EC+KAM)HX30m zABMy)YDWK$2JhQ99kx{JW^xK+K*t(-<#v@hMcSy&ZRb8fv!^K=h(Yif&f$IINM|V3q8Ib#gbXl&NKM zN36_+U}jLjXh3>~tH3QhgL&+Pqx!HFNK1md8{x%4=)U0g1o6UDTmv1X6!=U*Z?G&F z3Tly88#KZGp@Xb~zb5D<#n6T2C}26D>VTCq4Oha#^2jTDVZ-Qu&585)81&jCXG}{fNDy55=nb-&+fHRO%3sbZZDeF{<|{d3AdM|Z-VGWQWSEi%em+?M9j~J_EXG0S!LuP)G|_$bEP2FWQc(F%3!7e= zM{i!~zKwlG8OPpVel%W7p3(H-rX*|0o5lsn+l;8!jTe4og~(^WIHN_c(;5;^JERyC z@3h12%0W;3NgTBzrrf2ZJOI8uGGEH=;^^+X*FMhP>@02wJ=Ya}rO%`XEa#6`w}q?V zH?iMmF8$C7>d%xvCfd|PE=I4>5q(7!B1c7DzgU?8>G_!ke}MA;^zlN+Nrft~hIXc*~^OLdzkNIx-7)2VKgj-!pE^WZ3CnM^>i=*q}BBkk8r zHZSb9AJUhRe{1xmPi!b7f7LAKt|0#(#!EBH$ZgY(_7TXZr7IhcDt_u*tv}rJCvb@; z?Uy$k?%6|XYG?gEL0yH4#si3Hsrs+!&n}yt$0<8j5iPZo6-d&zpKVZZG*44Cs$_u0SY9v4>Pa5}=ZEBWBy%JE zozI)4>kytKhBr!wcJ9@wO3&|V{n#yS_Kf&E&NU#c4j*pWKX){FdLkc04@}Hyp!~{2 zx-~p*wPd(gI!@x`QoPvk!_ExvZ1PM*fEWH^3b}ZAED0hR!1NHw|W%p>d$)$A4u}Tg2eine539cI~|2?GETl)hg+lB zm&f5uQF?jgSd4OF{L1Po&0hN+@|3(W0cSq>W=H}nBGFwcU6-#>kPi(_{qd`O2vY0+ z-#aAsf2@1#gIKe345y)Qpf)$zXJ|}dp_>pxJX$_Sc`8VIL{RgjC7+HS>@kr$cir*e z?V@DK1gU(9H6Nie15PWLp9-`9Qo`wbMpcM5wF;7tb^9Uqf0Ocs%Gs62E64cX_gDIR z{e2@iL4XsFT-1!S#m#q@)QZE|zumq@&u{^9PV@8yx)am~ZP;O`%f_y7B1rA5c+UMpTlSc{l4ywl##c!zk&wDzTV=a)yOuZ2|Wp?VVw>m^Mk z8*%T5`^8=j)g7bfw`i?iHbw;gs*LCF8^JhFXV9;r77_-cj#>V9l5gBA!W z*trDk8#FD@-mMXaFl0oom&|LQWfWdP_+5IR=_P+`_hva!isn+;^QU%OET6tpr!Tv` z40Se?C(_!R|c5t4GdRk{9&pu|K=NA zN|RQ(U8JTP5XP!xo?2mwa7fSu>H^aO!9cwC8N3V-dxtPtZu@x5sjF`$X!^RT9t|IV z!f3)g&Lid65k*S%A&rSh#rAtiTDu9htCZSPpr#CZGujgjPF;V`CAvIv($F&S&JcX1 zz^*Nto|TSLT7!bk3~$#+MtR8*^pVzp*#Vm;FKKV5F{;VYDx9^$3q1p?8UKqK>h;IM4^waqAR#+k)AuE#+P=QKaIeT-Nqzl9&8WWz%eQ8QyZ+ zcW3?TwFsvbzY}v6`b#2o#v{Zp#kkCnQwpg@?><$TMuqlvyeLx(c#VkHB;xzG?GZYc zekuuJE_ul&cR=)&ur8u@R}|`BruJw$G^?PKQ{df35jPUhvf6oLyIsJUkM>~Hwwb=U zGhvU314lTfQHSYkh9d5)?mrV&Nb%JS`ABe#`;nFwOY_Fkw&7jH{8*YTmi7(G>BV;p z71C~KS&~qfAqQ18Vq2v?S?T`a?P2Ijv)26i_v0+*&mB7xv!dob zw?AMxf90N^!o{6g2&Hl70PS!0d==hN+KH0$xW$Nn0!i%`dk7(|sPDP@Mx#11wli<$ zcRgsqL~5UjTS_^2-gz5*YB7iJ>wqeTz513#CcZr!rrTuyHYWqU_7C|WPd@etCt1Aq za8tb}H(?$7=jp$k{j$xn)e;(VB#oW^hL)B2Z+@>rdov29{eOUBO|Yzg@%Wjrz?wk= zJ99l?T(#`}wT_#6#53{x@&wID zwwN@4mzBw(o0LD3u=)YZr4O*5H?e2`tpB47V-87NcQ9OxHP0JsQ9qg0a-Zqbu-JJ zidK^v)`qXLq|V*;OtWU*Gi^q}k%N7qFKl6Wd?#kx3p?#5ydqU_1Ep7Uc=DR+*IxmF z_KI;b#mw+n8?JxfB$M8&kOjG;le4+ZhErQDKV`#)L_T$)(aU`@6Wobh)coVZV(2}t zp8EvIYy!@G0yz#B2RAyn_P*uuu&Ba~%u~kUqbTqFj%(2~9<@s#KWgwMz?i`>9{RM}&b>&hcFon8>Sdm@*n1w!U7(&04*Wj+0^^>}kx2U~ zw?h4Q@Iwu|0I|`zCUu;#$kQ8);`TGxuTFRhBKpS1ZU8*mzwCRQEHnLm>=#Irrp?d#yRDMJhgvZ@)wx1Y%4LR! z4mL@5vM+B)(WD=Fx~K8;vY@N>dj9X(B^xrsuMOUzd6hNJ{Ius@)Spnh5A~~1U&8IT z;=W9JA8g->VZleO?&nzr@^$;s`xveutqWsr%gSMI{sPs_HmoC9oq4@EWZq%;gszWdLE77xGTQeu! z&R%*!!hKm3V11ne`8+GdwX1SjWp|~@@A6OfxA`-!y{Ej-I9a>b?k;WioT?c*YIxl` zZRcCR?-{~=6Lu93`3`4SZIf#qGg&(bJh`B*>4nM6#{LVluJ2K@A9zg#2i;Krsrs37 zpYGXbErRUbmQ$bH-QM4QIK83GJHjkVcpBz%hOiUUs;6yrn6ULH}l5m zx2Tv>SmHi{SAUO$NFNVzl?C+|WssT2VKo2N-t=Qr&$e^hGO1stnU@*TD6?dqoQuto z+g8dvib&s;4EgBYbl|l{j{9c&Uhoa@bvYc4*BoX@PWu;0abxQx-!GD|P6tlO=E<-8 z(AiKr2|V#hNM)>x>XV0^c?ow2G>msg?p{rg<6#H2_59qYu-eS)eF=_YVv~o=^WdM5 zd(A1nVzO9Hbsoi}LQZcDSywS}$p^A!7EUbg#F^1En~Yg& zMv0d<+|~1^^&{E-VeY;=Vc%cv)3emt8}GZ_amFZIjiHGhqz4pb|EwfHyMQm(0GpEL z4${Zxr_Su#^JanoF2%YAqo&o}mPPYgBGLMFDD|@OFt9r&&IWb{*e9%YkVR|j0gX~# z7sF7ebQzxlMSqS_bkBxeS%Qz=p<2+VYIl+dcQ%;@h@Hk?I+bXF0}v&pvkAA>Ko(wu zI4uTon)0CWJ*?DcVi4_sSmg*<>1j$JAray8~LYw2^KMemF_|uIhX!NtK zuI%j^g%2q|p1s%pW6puPn(XwtdQbMC*(A6& zwJpw7NwQvC{FGF?4C}1V=_3BiwojNJK0XsZ$jg}F&s5@$8YPs_L!4(SukS@q_e8&w zp(Ha5o9tA(@l^IgO&QJ#3$T`UlbP}rid$JFIbvAWCr(=o*gH;7;`7r#WBxPYr*M$h zGNyPXvEjU=Bpu4Dva&0?M^lV5!XQ?moyq;Ua2kILcT{nJNeA4o!k_RaMiv*XyWsyzdynruz)+UwlK@Nj?qh_9;oXypR*fW))@~Zq#sNjd?HyvC+8G7s%TP zjTb|S2qlW411dUdD<`Ah;o{+*ftxzHl6(d?4DNN&@+AkWVPA%Z)x$rTA;CHifNJGqnR~ zHmmGQ%>wxeYb9sxa7TaK(gncR+DJC+T~h}*tt4D|e;a<=NWzN?tc6>`xU-vnf4Z1r zd-0T&-?9ZM39dHux5v8Q_|AYtF|WZ;VEkslv*91_FGgGmeF7vS*oOp+mH~Gor4nnt zU^uiAWa}fl{NFX5_di+D@cfel{t$Loei!QNf;OvkT_@Gsbl@8fPOH*-$B_GnU{$GR zSxuzZxYYlJ#{_-*0&B)R-_&x<(}k1MmdqwUD0!@=3iP2vJ{j|rhs7d8jO|*o%2-J< zMNN{He1Tc{nE~%N!Ld=t0Q-Qr+2k;0PAywWZdXz{c4#w<+zKRK z!`e^8>JhOf0xKuSs*kmmZx5058?cuD3ajV;0c$N}4BTr5H(tYYhls~7;u#G*R=N2a zp5-NFOL3Q^_ zeM;+V*=2au##)oK9n2u640hO!R>=m^lVttNgkA6(t<_>}tzu0ALvGJqExSb{;ESmz z)B>r&TDXGHn@B0jJ*xfy{qvgqZcscp^$FnJk>SM}>I z*9)Bs5*H{0@;$!jQIYhbSuiR(A+NDifBzUPL7d3LmtT{Gq$D%?H=?qu)bHlguQ)D_ z_11suX{ryR{GR%akhY%f;mJ=s1F#(7`(Yj`Z|0%Z=C^}wRtF^9ufRJPvbrRLon_@U z$E}T7+p_D)lq`qUB<^OX7zI;L=0xcFZX@;LifbbslObJ`+%q;3ibUu6zwpI08(ErP z0r_)*)no0n-h!5|&#D#$~h7mrDJH9);~!mAQl~GPi+$g~(vM{82>q+E3`7h<8t0lqKW7&?UX` zIO2btsyFH;$+$pO9dvt-c&ZP@z47}$(wvs~d-De(=TgB3;YS~mao_4P=C({d{Kp4k zX+?8C9QME;`whjnT19L;iQ>CMzBxk6{0NBB&pNhTFL8@V&m=icl@SVc4divaRb>t^ za=xkmI`La#^nK+pt$S{Y?VL6{(^+?U!>2vRMttCU({_3DS;p0)UsXVJ73=bku!>7HN1X}<{$#aLCD2EIgp z!;$U2c@D-`+q`#LmtqSf$h-T^?&B+-xo{#Mr)`6=fL3h7@@Xs2J!_jkm)LlsrCQT& z!qYz5?w?orJz$kZte5vr8*xtcaG>An3B#Ry|9J4jAE#-VH*GWL{sFB@wH^A%(mj8g zj#{X7r}ASVaWZJXz-1$(vS))Xd~h`~=7}Sk5$BRoZ~oldflbd{dce|sJ4Rh)*D`~& zvdCi`q@L2eIp5EH1=jyp?0IuK$I-9fy@K!84Z>K-z~}G#e=S2{tV!O7@9MPTbl%Dx z3>O%su(XO=@5@E&eI>B+T6!+MdMc&kTj<;88{zqImd0c3>|2_#x>wt%A8yml6rM5{ z?t;(5Pg_}OYt)~QrRHQuQ~M1~TUk2i{p?2la-`7mKgM#>R(@DK8gO=Nqv^}wN8z;G z_v}ODq}MNfHuSu{bi_p&9A2M3+x@(N?^ZyX@{~7bm#r^L1IE&sr^paTy?Jdw^av4ury6JaHXBn7VsDJ#X^y9Jg(Kn@! zj-~HGx`cG!BBzi}-3W0{ti-DST4GhKMDKqs(HkofcN6xwSo)fq($~b&$KRAbK9+s} z>9myxV(~$-_@G$)CB*;d*1QxevEU}i3u5V~VvtV7;)yqv!>eu)^TwOfH^$QMz9}6R zf@1ljH>DqqrH@29rF5tL=6)X;EAci;(6+uEi?4{)Nchc2t%#Mla1*Wzv2^uK>FQW| z?SF+>8!IsdB`6J3#Q5&W^2+&m<(>jvTcO!&?ZE#TFvFZ;Z7|Qb>dpHrzjt(4XILAo zdh7R@(SzO*kWdOgFZ2z7nC1jRCS3#W-HslM_cC6fuP*!K3cOE?P z(4p_QRG%26vef4fZJJhp5#OuA@7fbv4oTkGu_Z~BKQw)s_^mnXJu|$>*}3e@@}0|V zL3O8eipi)|4M#obc)zIHlGKHlz>$62rAf;!$Rafles|&NE=p+)+QO8MR%NrZRHNA> z)kwBNRTZgvP{5az-yZSRiS2WbT%aZN+wZElMc{@uxz--qg&sMVQT2C*D~gO2TlUMG z*16{|jNJZp7kDz-Jl3_TV#msz%W9VFT#hg8f-Z_nIONsTEZO}4?pa)+h)_p!qvfCO z!Z+z7Cdjy*ZXUXQ6ZxI!Gpv*owHY|a^W+7jeTBSHZW*MnZqBM*Kf38T!Y2eYW zVXeG3QUR zT>(gQfu8+yrSC`vn}U*2d7S}i^o!<_i$lA07o9yD=Cb&zx%0={x)?v*v*_<_U4rN{ zczuF2rf=!LA}f5U?7O~EzQ6kJc6c299Iu*XTpx3?Cf5yJ@CqI=a z1omLs_N;tX>5-Gvi2}L5^c8575=*~B|Bm2b{NqPDDrqGaR$VeehQnz+*c@gYIX5XcE)2aGfX zP>R7KB7#PXBe-r5t9V0eTa5~oVzrctbt%;WtR=*1R2pwxjhD4FSWBs`HnlchwyRjdhu?!y) z;j@QCAnt)a51?)so5l_!?ej5z=&ds%<|$(mT{Iv1a)_yOs0?F3#OuuuNL|VQ88pO( zCf*pwuk6?rhyg1k64BA=!-eZ84NJ=o*%32nSXw-K9ZZ#Mse{ISCFU_*@AlO?M(Ke3 z`EOw!V8i+#=Y?DwIH9-Y206Gzxe*+3y}I#v*rtkZXRG?4Jv2~>=^i&o#uXSQ!F&b@(MP&Y^~l@~&2O1nYEs{*l$nZeF) z2l-iUz$!<1Q}-MfcB9-{sm+_WOxu+EC;PUBzhYLG$A07h4&!F*k!Hx@s)snaaR^oq z12G0ZP)n~XYKLN91V%qqjgbOmvalR|&yclg;h@E?s%$QYMusiowWpbe5rG{B0@+aO za1OL#FJ5hwwlr8cj_{eEL_%ox+%vv1Tg zkoR@;F6Q;I#P9Z`MS)xIdL=u39`;DpE@Wo9ca z!S978x;FI*;1=WyEd2P*PU6W7&}p5(ldM_?Ig;TZ#}yGq{V}XVaBpUz^pAfu%jt<> zn}T5FT8{Om;}}WD*MbwMc>mFgaN|u<#BGvUtxC&C#C(%P zg0ir7x}%AXgoFENA8v?2&4#TiCbo6ZhPCY{P=^Kfy|PGtLdyD>L#;FwbjH6K_V1NF zz>9QCYlf85Ea{U{P|1q!W3tK4 z2?K4%?NU~5#QGQc0a}Oz@>$wIE*#%`W8)7dT!_Ih_p}Xd94vS|_OcbnxWln zLSsaa0ry$r)0g6P1g?pw;|FoSiM7Uo_WtY|`w-Nnc)53>mvOf}5hpqxE~G1a$K^w? zG8kq&*hF=-jF8_C87Oj=*76+Bn-mLu)7ImSS$GC}MuAA5wJm zmes~s+>)NFrh8puwdDEv!xV*0-;F*;UQ$=|dn#aJXyVoHMlXmthn?a( zZrkjLk$UDqsE!vM_F2S0I+bK?{l@t^$~C`8d%G*~`a}u;>JKcN!B)Ryt6LT zy^BbNwSCC{#b}K7X(G}#z-6@P58NH?OY_FN_X|-CI+C>w8>z%8p0uQyM7Iqa-Fwlt zX7xuK;>GpPiN^ho++7Kaa*Q34Q7ByxhguyB`7Gh7(A|z-29Mn~E>LiGZ0PI%$99JN zlF$s>pHS1U$VM75kDPWCjVAW01(vMwYpTi`gxJ{TZi?J}Aes^eyK|(FUluVuYE9s9 z{hDWH$kvn&3>(Ol@ElVD`KQei_Ca#=_9#T{=k zfX7v^b$*5?)HC6k&HQeX{#POhc+RN4~cR-HIURx$M~43gQQ~W zTEMJ8x5Q5Bf1PR*{BKcL`qNV*GABKKxijWfCEeXpnjvRb8iQ8{=#RWLbeu`1H1;{H zxxGUxiK9NF5LaF6{RnBjDie~|U-kb%F;=8xG$f%ih)si8-eT-As#I#cyW^ICqoMaK zu3BLyNpy+c{f)az4y5W51>Vw74NCY#ERu=5!poB9WDr-nZ?zNGa)vWI1K~C!S~Afb zQ(-h8Q^uIGy1yY@s0@mpVMxW=*KHGyP#F>xEe)o15OZBb7ERw(Dh1?QX-~<{7zOx1 zzO%te>3%V~WEys~8u_!z7+I5)?T)XTdb(@qS`YW%M@Qh~%WvJwl-ZaM6_imj_m_#1 ziF&QBX@aw{>PEjN)q5mEE|^uTl{{HmTPo7;wkd({I+c$;n+#Je2^@@$Mo(&ObLmM^+S7S8+Wj4<51JDW^^P%#f9IXM z>9@2oxtq|+ovW&*&`ht%!q_c|d8~~kF z^y`{cvg(paS|er*$+yuXGpNnjpaTix#)6Ts)ii1rPnu;sk@g$js!T$u% zPTdxRA}WK3hm5V)5AKA%u(5@A%OawqD!0#@rfecpF(e;;Wm_n%eX6oaJ~XNDY7=(8 zNl>zUeJSnkFR2yF-;=R9{wg&?MuKum=T$N0Q=lq$&OeW+-mue}!JTb}LF-b~+S@_a*dOmNjlXnUj+codZP5q5B=ev;4Q=IE%^lJ#hJ{ul7(khJ?kGZM;RsPW&4MtJ2X|&Fr zs9Ha%IG3ITq>|F8+X-_22I}(o;RJ2~YE$__U*uSXlr(H>OCim{I zboXvCvHruiRB2M8?k$8bk81 z|Ak*8&#Dml!7B^DwfROU1??^+uU(*YXn#;@v~)-N zwrE}<=y=pZ?Lv1M?2CFUD>r3zYlNd^z#CM{R7ypil1HEYpi_xbGCz6~cpo@9`Ki`j zo)=YhmuG)ZSXOl@_34Wf0wTYZiKV&(eFb-Tu}WUHABaan39< z-V=clP!NmNxgI&zhl|0ulhP2xQEh#YH061V7TU#-kc41AUj@z23cuGCVY+FseH7T_ z7q+KHq4eVwWlh1(!9sdn_cz$ZnCq0VG54@h-Zn2_Oj$oLQudDSJ+WPe1WI|9prv&9 za$_Sk7Rr}KLBc=;;xYleHW_)G=ahS>Esmrv&dTq6dqI0gx<2KPG^{dZWk1_v$?CGE zSSPLT^~h7Ritd8r$;Wbc-NMcejL;x=Vxbf|8Yv$XNI2lIK|&xvwE<9Hlt;I#wBW_h zs5S*A;~Q>5{reK%tqmb!9Wt7liuX_rfj=#zCp73@yP45+_ine(c3*^NN4S6kh`5WN z>n`8n=i4s3TdzC(e5r?@-;trf;gkw(cglUsaGl_*$Mux&30$?llJsm+iP)~E(bk_> zPB=AnIPsWftuqcE3@O6?!~Jj)inah4U!~>L34T_^r4tpK2kk3sdJJW9eR&+dLj-n{ z@+27Ff!0y_g?a7WtuWT5K`FR*ERXXRl#`8LILqlktw^qNpznb8e4#@ZE}mjJcG(xP zEq1r2$6Q#Aq$6D?w1~;EtN3VZ ziZ&&(6sNBQG9>z#$m`?SZkw&q$vQbB_TjMFQHEV1-90$;VV6XRhwf9GWTaJXBy79^v%Yg%KnoZCSzu7%;iI5^53$F4M?-SR{Ri9(eRXqeEvL zeM()taUSTAa-*BeX4dr&Z3uNa$XsZsd=fJrMcCu>G*UgFr4M%OPE5Lwm1P{+qsypl z3WaP#I_hPJJldgSN8eP89bK1%Q8#7;3^?H_<%V)qsGC1Vhr-oJ(fnGp#A5zr?CA8E zJG;>$ShtJ|iKVVpia8m#FTN_O=|n5$T`=dukTnT{%su~du+=@w>ej(|N8={L27k|g zf55e(&3?_^e!PWV>0CS=ThI24t-n$EefZo(8L+eEg@T$%?<`ukoDntg(fP`d4-Z_V zPm}ATTH9Mv7B>e4x6&|Iw57~E;L0W!XV1Nkw%myoKAb*ge)H#zaD|o`1$D%4&Egv9 zol}08ZkqJ;hl#ESFzfq)@}rKA+iW^Y!3zaZC-iVbGl=pX@#t3@VYx`Plw*7Ju+-8|DHv68q6eRQt0`_zq{6BH~C@zx})Zg>XPet(z)%4>)vsKky8EV4--4m zI}F?Cv&X#eW!PQ2x_!)ni;$U*Jo&xfzw+-L7X-hreE6fQXGrK7^a#m2j>bkQNbeAg zcgOD?w;g^rB8(4K^bBKVj}W@!Pw$}N{GR`r9{+cT@kHSZ`6-hm8MB;NPyGQxqjfbc zHJzCGn*BkJy_0e*hOKFN$<^B>bH=s2RNvc`obf`zdl;X;ST9{?*r%#65AHrku$zoEjaPz5{&PzNbGRk8}jW8^Sv0?5_DanO|WCY zvWL7me(8!t7p>3q-d*pd8E)S4S%$eLZwkKR>hmncTSz#WM@gvNSCHKuih3a5V=Va3y~)AU(s*ylL^8 zT#|9YZqrddO?{Vq@8*Y;7t$P~Ed)JdyrJ-m=4S!V{sp@gI=kx&d$;Z(14P}eLpdtF zY#MqxM{l`h%5Q42LS{UbD=%pKvvKoidwhcwf};y$d9=H~awcvfe6(IJ-8ho?az|PO z8EZV*ykX;!mbn<+z1V@>?S5exqtTgTQ%-r1hQw!VSj4iW8$Us+)+U^jAtzM+tkPN2 z`B07(eHMGCwMA_#*%qIA!1WbEGW7qc;U`x}s=eWfNq$bchEbJuq=n96OQQuH)`4bI zIorK5pg%f0+OnyM$^?E)?T^t}e{rt+Y;-AF^4OT}Hd`R0qNAEBoV3@IC-$8ZUDS^w zMq*j!+=LaF`Qmp)FAEg8T1ta%cDA=*Kjq@e)GH4QtpeCd$YjtR1TeEY}xZW8_BmNe@<>(jjN#o8- zF2?^9YIUi`Ni+6`-Y{ft*s;bkafezmj?P6K>L<+@X>^}5xmt1z<-m`g(sNe!JH#fd zUkKQ>I5t??`jt9I_ONXFeUHx12~BJ)nw!}8M~v}fapK(I)ItInb{;idQT{4CrF=oy ztNcJ1tb9oLc<{4H;1S)sRY(I}rMph|e^c9eTz_zGDXHhuVEf|mM;aE)f66;T-7Kl- zg?mgy0jZ~RQjG0^jN|P`71$e)p>g`7m)87l(&&Pcz=9>c`se+i9;=gPoQ-2T4y9h( zVBKfRPsV(c!x=r#^sLL@y}!&o4=1r@las1|AWF|d#GFl{Re{)xaV@A1cX>r3_P{L2 zxQX1%CS{%#NzBoTJ7efvZ8k}p9f(>xpSvPH*|zlADB%_qcUv|wswgGWjOM2H4q@|7MNVgN(1Jx( zS-i72D7~d_A7s^&L8>p*(v18LN?W1ga@L?uVPjZ}g!|GN&e>))_nOj$+1kEzM#)4& zoBd^z!&w^C*0SdCw;h$6=f`~6SsL^M!Zf#1ef=~lT+?XJ(YaabR)lx!?+N!A;i*W*rm&KuWvyfFOjf(rKgiGfLL zJ(NyRl@>7Ey=U5X+2(E=IDGdzq4N#8V$32sv=YqkMQ=g)vqKufv<{tn=4=Ec$75DJ zT7zArVe+<_y^QlGjYaHU!%ue5-?};GobtDJA?7EC*LPH=4qDwO>8JYoplqU7Rb@pK z%&4^LqBj_H0SF~EI$_^#4)PBIsBQ{($lIf_#@k~>gMM|oqA?g!+ObaVNpHO;z0s;~ z3T`%Ge5|r}mIT$bd{aQNVUZViw&3kWZ*u1YE-UJg7bQV&FM44h&5x(Cic-OZn--1%x1?0125RJUvKL2=|xCA1CTqybe~CD`@grt#A%fh0gHsV0QM$e+$9WRurb zG>zq|OSo!rm7*-A&AyHjSF4h&{UPXT(3u%;gdVuK7I`6Ado4)lMaSB5pkeNTi?oJ4 z*G$U?<{vE*+mSd2bj}4sg7&lE>xC+%b8Wp4#Izi#lr@`i$LY|ig3;P>V#CgTdE)$| zK#~@^wIQ8S&~*~QZ@)M;KYFVwi>_!4z@H&qcU-nEbu zLVf8`sv4qkmK1H&up@4NRTz8kx-703cJQqNX<8^CPF+)JE<+YZr?igBvXU>w8TrLq zhZ^Zvbkf%0n>6I-Dh)X$+EkOK{?tZ|oFkVR@>u_&#%!X3EGsG3M#2^blfhf~SL(K* zz3M;87)(CilKrKyS@UVKzIUT_ADt!Gfj)z~$G|{WK+!92Z=*EEZXIQ0NHJWy-3rE! z&n{Tp-Tv;lP})lSSQHGj(Z_oFVFDuvZLxWb49`(OVyl1?#SG(WgFg9t+|Xz0hEWU5 za@_8HPhatX1E6v6xVpjP?qGVK)G#Zqb#*D03wXoa0=(QV5E&!sw(4YXh=37f3}drl z(@j?w(}nlA0bQUM6yr}RFb+U-P!KS)yHZ1zCzwM+Pn-zFeGKp+u%fH0 z4e06`HY0$hqN~e6lXI(3B{1`WB|t1N=90iX3ZwutfK*@(kO6q%y&vx0fD8!D6`0;Y zAdm%w0po$Gz(imcFyWHGsNpXcC;~D84KM`uX~5&aAAl*qBp?9qrsCcg2nLh@$rYIX zfE4%>kOa&IAPtb24vfDf;M*~b#!Fx#agWe2j1Mr*z%Y3NrniBofSEuF5D&?#=-Y#mG;kj`kB96XQ1(`dZi3%Ld~dDP4#E^v*|vV_&^8?%Yb*xxu*?#@63D_32{|ef4X{ zkjIC0z5n2uXJ5QJ=Z{z=9n9o17nrf&fw!1b%%{v_;CWwx^K~#on8(3ix|sLT0G(l; zWnN^iGBdydKM{Ey%P7DHGel6{NBQ9<@G=LAK{I$>EX?P?xAMT(RN#a8c-zgtYGA(N z;b%oK*MQ!g;C60)Xh13i_^1>4Up1b}7%M{5AS7Ni|Cay#C*=OWz5Jss`0w}s_kV?d z8wC>)bFk*yoV?&1jqTJVbhMw?j1IMWV)V+s;0pSO-?`AS_~sffOTgy8AMP4cn=<(U z|Dj92+V|eRkuf_m!ZM-^ZqlRDt$edf$y)`pLy1AbxVj42Q+e4dX`fJcFm4&;i`gq_ z-%$HyH~;&OsDLyB=wC4E!METZc`)U`Yy)ST1Wq;vcmn-B+VAUP)XYRC8eFh1eB1!^ z%){W37r^-zGdGzv=<`|7!`saK9o&=ptz}Y}$;<;7)DLBrf=BKX`5^84Q9eldAMMxm z{EI>_&I+E@jDDOFcPsds861+D1vsRAy9#jOr$8y?jotl39dnS0W0u|Km-JnCzfsG4 z37>cO|NbK~zh}b#P3Hf8|9?})|7S$YYYfWt*gTm&Dkpf>h-Kd624wb~9>2tI;^3!3 z9vfXGNe-XiXKGx&uWxzzgbAlkX>GRL+)I~Ksx7l;fBm&iXSWv?-t_TVwQBtMk3Ndr zvnMg~0{G0`W=bkx0(ky1c!h}YS0M0U^QEL8Vo+tZang4>)1Pw4h`B#L3|6EHg85e1~*M+#0i?< zt&7f8{tx~HxakQEp7o9zwAJ$&EoN){@T2c(`^*R$(M*kuaA|$M?NqX8?r}*K-&x@< z045hOoe5|5pf$H(n^`5=G@xg&9&Vg)OH20mh%p4e64-@fM6#3F z4_XF`zx}wL23El?1oyqbekK{_iK4Ih@D&0d$-rs!7~FsS4R_wD{vj|LSP25pYaH%( z0&abJ*wegCz%#9NJnvwrU;4*04)IPe!fn$(F}(DqHDNJ6ghF8Gog#RRz-af~d8T%w zVAuVg-iO58{oM22f6f2RoBx)9|8JCmW4GA`7H`DA=RAKax|?;}Z~gj@Uw%6O;h5#_ z`|?H4_PXU>$5@GZb#KQFCXjiE*9b!de+?_ppTCCwf|T?Zj8RuGcrg7r#NSYczKvyq z96^p?#s^{eia*Nh1dNgBuTK0K82Vc#{w9L6PGsT*`a90hUz+%nVZu&7hXJW;l2K-lp)T5~b59S>Xhel&HFz5{>U~WGEiz;6+y|5ba z8OC>uFv}%n7BJH=>y^eVWFBSy!TgQ+k@*61|7Vz=nDc1kc)=i;g=#@2LXT8LWpokNERA|IAMZtNze)71y1-`*e4_jV}$_Ws_>-no-j}tE_^IJE~E(4 zgf!t%;g7;hVY2X{kj^*r1NhZ^5&tQ#;v@Mr{Br&iz90V={$c)Cei*-%e~o{WPj`4b z_By6IZaQKd>l}Y_eC!BxyzdBg1ZzSxDvesxPjkPfzlO2yw{EfSus&@4hc(zxZurCy zVVLDw?%L>Ds#KcaG#jevGU9;UnT>rbrk~T=a^v?ex$d`m;%S|I{NTM$T?u*hv2P1+ zUK)Lby2tnVu1N2R?(Vy-f@jVcssFOlN1CpXU#j zv7SE|@$JMv7^QdqM2`Zlv>@}iA6_|YWlQ{Q{Uv?O%CeOO@mhVkK41U&e&x!)#DA*q zqu;3?zrV|SWPGx|Q$Je2W#!Y}C*xn$pVs^7eH#yWPl?~JpRRAzH|&4FyEuM;ex-hy zzNqmVZ*}~a`e6Ou#BXGHv<^`gVOnqq#9FGfux=KVSb*Bh&a^=2872{p`k{G6!XrH$Ip7NoJqM*D}Xu zb~cX6+?M%NV{N7)bARKs%nO+V8cQ=J`qg?dFZPHP+ag}YOIh9ViGJ5tpU!Arse#27 zeXKzUo<3mNmwn?$w8<)!UbphnXE}1lG1e&a9bY|6kM;TJ=BJT8EKXGOu1z;~pYz_I z+1S-pXuqWD@$&y@yfS$G{G%azOAblm+P|Gp)Q}HF6R%DUubHj=yfgW!Q@+_gOJmlo zvVFZ+(K_ewg@k?EoVr|Y`OlM*4vv3!NyYc-e-*(*l!NQQULYO_0V;rRflQzXI1ZEm zbes|nSb+wh4!8;A0Xm==*aFb?wp^eLI0bkEzQ94C2ABoV)kKEYnQ*V-u}8wBr76cE zu;TsUnC)i{JkfjWwTSy?d=;?x!tDIciMy9Ix20Phyg;&gFuX#gHZtj&e{Nq1M7YwW2+WWOH*1oy@{D&*LVwIZPfe17p zfaVQY6-);Nz-^;JlhHrO#O83__Yse>D%8JAr zQ{_=|a{a|t7uomC`727|+y9Fz$L2*F%GEBC@J-DPC28ni)7IF(##PsPHsial#AX+n z_|53_x*TaMxj~wHH;<6J$TgFTTu!5V;{s=1n@La0NPJw-D=cAMB+$ew7DKCylwC~r zI~leWrwMp@p8Po%!$~;0HwJrwEYO}}VKO8cUK!AC*m`1A7xw>3>5i7){1#(J`;eI8@~ z)L@3zynHp?!w8e=<35Kxc5}AVGtIC14NHnWQXu6erT1$5r#o&3c@(;0WY%GgYmYB?hn=P?~8L6yo$b7|fh01NtgL&o_Dv6rDapCEl=Zye!yvKZA zZid8Xx0^R%erF4$orpf3lS7;F%(j_j*UH)#bUa<3f)mrjaT<#!BiER+j&zvG&$FSy z3NgD#e|cI~@tO$iZ?hrg^R>{UinFCw+=DrvvOs^cmho3=OKHZ8L8CXAbQ{gbcZb?Mwp_s$v6>a!qe64W#q)3I7?o3az=%7)3r_Q zV_WET87Ys{FgW$bEF)T&q?S-YYFtKS*#sMsSnPwQw#$e%&|m zMiHws<HV$8~3L*JMYLo(-HlM~<7AaGnK&>QjtB1cEWKd*ZWtbXLTsd{H;8d{7NOYtP z=hELRi%nINfgF`B@QGGU?h{SxVCu!?pteob3UE6Csc>sO>!0PqE=9MERH^sdrq0u~ zsRPX6;#t0K%Dl>aQ>%55ix2L((bWC+Y~jrh6Fb-Ga%8)EImim`+mH)0x_uDiFqtHk z*Sa9P+MzAYt%iIxhP-VmZ}Kw68S}IOW}3DDfrq{*cdo}G_NfXf`VgC-T^lE!JLp}2 z$Tj+9V^eu@x|M#d)T=_jozRLx&9SNF&?tGgZjnt+2c)mkvl9;Zpyw;7 z%PB2UTu9)&7WWtbTe}6hHo1)E*>ke{mG5m&?4-H&9P+Pcy427S_dfEkG?&*_BPFS( z%>Rp_T{4*#dJLb3X=k-fWmn#&n==f|JZ&QRGtBq4{%+w(aI&El*%B{d{Fj# zbn?jaC1Pmqbjp?evvT~_tEG44t`%o++$G%#Eyy$`nlA!Er{)`^HhZ|auZ+snITu*3 zxIxk)Z6p0Qfb}P zXZ!IIW1*<8sR)#J>6cNtYcP4CKJnVOUZiF7bcnko7Hfa`jDEiWd4T!u+Sjv;x8iLX zs}V~mbbO<(<#gBcBwAXdlqW@b%F9S<$%2`4Ev@4uV z#Mv+DB`Wb#nxSi>cZ$G*h=LK_nkjD5wwT&3;6~+jyNDUOi^H*TOG1Jd?>OB;=OSV4&{q99p!TYH8`;Y$St3r z_`Z1q%$u5U;3C}@rxzrUbw*Ysww1$t3YY*;$!Kje+0KX#8}@#00ZL~cZ#k+z4Zp`* zB07fDdvy+}FPWS8_u8U&@zzwd2iLSBi^pX?39ux4TtoJEf){)w7LDc-%ZAi{i54pl z?q+qS<0_CaOHmY+{TgrF)MPk1tsNI&`}{ESaiH%Mt?X>;n`j%B@)|5jv^YbP9y!GRt}^XLaUx ztTBJzxy#bE{lLZG&IK7C!-YU~!Ee^TC5F3P)?2wu4EI*2HKT3rfs5}W+*X9Y?L~o{v=r#iD975~ z4^6zTO)b$LwT(o$&&f4~yX^Dq4R)n{L+hzly{+8F+OFC1a%*yj=7zI%wqESKTzcl> z!{`)9tk{~h8x*Xq-%!NiJ8UgtNa)fBVbj0JAJ+g*LKsU;M_k8!gVfV@#khzA(j1S( zxkNn-N}RNWM2dF}9oCMAo}}kzj^T`!AX93uJGWfh(w^P2q-9(SHGN`#^ma@&V%nZ9 z3sUMkk-w|#pZ+9ywqU#RMCVS+O~hH&3EgzKBk#CtW#^T=%}SyyNl~THh%4c&$%7$Fs-8SpnTj0c!d7qX z#gNCTe(*{Or%@j=E^Cl*gIMfqM@g`}vf=Eq80xnKI*q6l5NMUG@-c2{7f6vv*>uD* zu4SnwrpKJ0*taW9S=C6@6u5mTfe$%Tf3`><@{F@lQW?$Z1qH}o?gm`UF*}jNzivNp z@jUXJ+VEOh0*xbzwI&_cql zL;phSlhbFa);MsAE=OWZrV{S&3*I#2hqF|3RGodTho&?2$3P1UDNntXX+;YPYKz>I zOfsMLP;y3DTSO^2Iydn;&${V3HR}#N@21Tw+|uqKhs4zPELUc8Rh9`pht!9Or) zk)4LV*X25rtV&k*Z!$O?7%@C#;ZgQFECw=N73jBHQL-oeJk@+nr5wb=&*QaqgeiyQ zZju2_p|OhY8Y=`vETYoJa%8mDNEb!I7i6Ju?`OIF{i)VKW|WV$yJg7VX%pxPpuyau zO%h9gNUX#-AZ;0EAU-}+W+YFJz}HQmgH3PDrFt2!Cp6%$=cvX?p6sj5gUhd>?H+uY zJ0}?sns`+Xc^mZIh1z(|9o`U=0Xo_Lzy-6=Fe~?KN{9CNI>MJTWL20%zDt1-A5r4 ztHeg7#9-`-yufS*=`?BnDxPwZ$ss3)=7y%$1P}iW$fFQvpeH`| zpTM_5 zvdoFxC31|B@=C>&4Bj$#k2Ry>k=N(A!^lqPmU$u-wVP3ySoQLRQ51?$B=yw_PRDEGzu*OVQt9)^`dlR%# z8T;?I4{4aLixm4cTVVrQ#a>M%dCa-rw%gvFzq66QD={;(8ncn&!)HQ~SpRDbm$V z?Wdsjgb(v!;?+?sl?3HZ(ckWJ^J-nu9FoqNERe)~{i>)=VZTw&2M@E-Ac5_CdeTL9{Wwuiw#&ZuaO% zw~}#AD>P0SASnd>zg7cgIg}*NDN7AEPDk%kAcn}KNS^e$+^pV?KHGfV6s{gKVe=rz zX&3SUbC8gGn-qz@2lNJZQ_Vxia~bK{w%>MWv)l-2<2ssmvE-(zEI5{2gzQU{H`J~{ z2~R;AO+S?;b(n%7&n7g>AbBX^P+$cch_v;ir4qOSm#-x_WCOGs(h_4JPpS6XC-1qA z^Q{F5If0)mlq(XiX6v3DwzfT62N_ojtFnw-vwGdSlMU;(Ctj^&4Sueq9iSDuZ?s3W zV{^x$Jl~=9dCUklrqNIZ7FXZm2YEH_6ClZ26%E~oAMa_yH)>WvGA8PV86`}KzQ7e< zD&OwU3uDQ*3av4gY*J0-1|Th$RNlsFl8V$cB~%1fleg56lz#Ps?$Wfi?H53u@^T(& zQ};4zQh9Ea#1PPj4QU$ZS4|#Q$Wp6Gk@ItWwMa!(mKSEoenOhha-#zcWHP=T)ns4o z%!8Z(rPBpj16GfAdjqLHJ_;Y*3PEi$^!un|_7qT1-g8`&ml@ zm1I!NalGG}6%6z2m%Z)MfcHM%9SXUQ8FcRc*NyVtf08q zWXcjqeWX%y3h$G-ou03G6nZDG)34#Aes)fIhRX@K^aqWE>Z~@Cni$CEjy99BRsko} zPeDgFdhU?{V-l~LvXqjl`7BI#hh;`sWeCgcQ!?dZQ<+~fzI++9)X~slP$J7v8W9g@ z*r|`#@>(M&mMe?KW@|c{o1P)$zGHs}GR|Yku~nmR-W|Mh{C!3#dMr=N<1q%>Y12Tb zssKqLRJ-MU#y7HSEDxgg{wPl;16m(|zYRWRTx`Gu?jX_;Dai}CkIfg&gCuNI9KM(? zU?^lXJIEZ)-2y!h{q_QBk8A=>uD?o>-_N4*&kv>vihnen;K+9LjB|Kx86b5 zSe4I}^&QJSE%|7MKif5bG~Pa+^^@eQhT&rpuU>2j;}(KqthDsK*sw!1>o}qBd~O$P zFK68;naQ?Uck!7-oj}gi1{?D*Q~j5TpGKqf4Pa_-Vx@!Q+ zvJCvS>Q4FpvXT!#lwr{FF)MJ!@Rh9T$ z`RY+w-yweoHNA>_q&YEF%!#j}1V|wr#lEBK<^t0qV}HX1QnbfGlahT0WxTqzMn5Xc zA7Mq&eDRqmtC^CX#E>%yr)8rX9?kf5SfGL-PlLDmHB93ifdb?&SFiyOHYb&lY;Z`_ zeLbW-R!O)4;5=Vp9ce@4e$Z6aot1lDWvJ_bI%ty_4wiezx&1G(Iennc`i`m6}Gwo zG8z)F-q{cU9f;MWA%-0;kSWM%v_0T|M$9jxJ&N&baP-okb&W(1l(t^gSOxP(otOm; zu6+Zc<6Auxwau|AmV7lk71}VO(MvSPFl2Co5Euo0d!M}Wb%PNubUsn!v*lq!X-WsM zQDj`qI^%QMBUjM;d9Q@lpcL+E$s+?$R+)ZN7auaR;@xHP(mTXl`%jldYF`jQE}r5?>-&XBFUki z{=6_)-B;%|)USb3q*qMs@g1+@J5K*ihz9iPW{FhK5~=1%-`4#$=zq8@wV-sg1{ja6 zF+;!bT30W&MAWo5#51oCU96ZvIoVY6Vd7Qr$JX*znXTRSjO}e(Z0@JIQ*zB(te5(D zLz68}O66tfBMaoXsg1~dq!uey(nevn;i8EldlQnmYUmKcs;?zcRuTBi`9#+tWBlp* zq{=2UIjzz_CzrIy%Ywe4%G?o4daufHYW$(q)K>;gxMhf4Mk>U+S*Iro^gp6ZE}g!|HMa`j#K778a#<%v(DN&kKtTFePCGmj=`1dNXt)3PFJ^+Dp~0MWaF! zU1l;_Ro8K*{)eKhLEjX)Iq`jQ?o=a}vwk#J02I%2I-{jmUjIh%-a$%X58}_{~#Pvu(uJX9vQoXdN`XvR@c# zN~p0VrqkJ<9bg=0q`9#=faZ+=T^}072)1fIPui147}p@zwn4{Y=ja*hXq8U>6g;H4wU;qsTc8axln`DZX8no>7E7d) z>p1_C``F^#b@o6OIk+mCm660%RA=?HTIu93@m85CfJ)ZgX9$gN!=Nkl6_ivJ5)<47Q!N zFxxRm&+Z;c(@`odM{8tdV$Hx6B@JVJKbhJaGKA5uK*@H0<U#6|IlhCd&QBT znT`|Cr?w31p!QK7J8JHJZzlU@zbn4g$o$~L8|lSJy;oJ1AYmW_I8Q;Pun8NzAU{aT z-jSu|mZVm>dFh%2qo51e(7PQ{^V5=b^T6Fq*m;2&(0eFZ(_B7tcgsJc16q^rqJkll zO|Q0kSp?lMXgqq?h_(lzH=uL`itc)*u_|ltzYN=QJLC;#X`X}{KJfQZ1Dh z=H)3@Cu&MSuRET-&Ff*NdAVrz(hh1LC3D+RbD77C?}wbGHX7M#_*m)5ONQKUGAL*s z7S9}qP-Z!)u9E_j^|oD^6B5i#Wsq5CQ1|pt>rXMpa$}$kd>DJLMD?C)Zbx&+=Kk-| zuQGk9hR_qDH8rdK8z}YdCMxyG`(MzUTcuz%&hAwImU@Q0mwvajHbMK(%Cc96nJk|D ziug5uzbwDG<%bQ3>iy=RuPHLdq5w0R7A0Z_-e>WvACrw2+AE7!6KB|v;nE^f3&9T*jduP3p;!A-?D9DjlJ0iQ67GbPFGB^ zKvMX&{I|O8)<&C?rl(Twunck@?=8Jpi@=;a-P^ZVRa)Tw=11a$pka#)trgn~-*WZ4rG}Ed#5Wkn>?#Dch)D0ZBkqJtT|*yBP1HOnLIta ztZ9Jp;~57Ve(opC%Uw-lARZF;_SiEg{!2)!zd_rLMC!*q4N z1X{wnrAgB;jxC}#yv&4kfzm~87d0*4iju%ri zCfA)OC*!=3N0jopvnemHY~olYD{LGl(%Bj1rjyD8cT@grrQ62sqNHVLPC3$6(sH(- z1lJ>Y5_%1j(9}};UpwsA&~qLC?lPSK-9TEDg7m`mh2e8{yj-oA_wos~kUI=~sW-HD zFr>D2$L1=`5h@ijE>zO}269pI$LZzQ^ zjzVLWTCO;;R+fVx4zwj)p2JF9lr{LPB$ zkE_99Hz5|YT53@=MYU4@ZRv*O~_RNr%2SSQu!l95Q zl2?W=#Jt^i$eI}kLvkd^GsdjNnjvQ7U8F>{w?V*N@MXxM z1Q%AL2ZdOPZee%M6^Fa9b_xv&e>nT;%i(oM1#RnT{!dRkWQ=<|Hs;>G7Q<>=PuM2d zj5fKgEjK^+EwncQ+VxtOcB`&Z=LmkZ@W`;DsAG6B_E%(rsaZ-Qjb%xxR5N3SPXbAmV+v`nbeb>m$y7*Mg8>fY&9z{91qUdM8 zV)Xi}!BbL-Z4J=twjLVfl_)3FWfjJPw4@YczlEFjVWyDOtvXv%%`U8iDXjM9!mo8t z7G90IZv%F3s9)Khcwt+NNlQ+}G_fVky%>7tIKSX_2REBW!2wM{|>H zLT;>15Z_X*aXR@_-JpSwW__MARt+6`&iIoHbmt_SB*iF6>DsUCVVLiveumnou{K?A zgvXF%`PhL|b%Q0r$X}Oky)FQyWR5A2{aQCTbqL$J*^B!;#lhMz^YTednDMQ`{KB?E zx!q`=U_W7x_N-re*&b0(z^)imMzy7}bkOjjYs!^PZ$Hr<$$ryd)JL9AyjrfTG?p_H zlmoUWUL3ooa`ObHqTiD-Zt1?qP)DVgctsAk#3}T9IB?pWm)ei$QlR`w|_BT=OlL7K>`vaF{u)ly63~0~1Z5t}ua-!`d*s?|qiN5S^tIovH9+XB)XR!J@ zkF+4vNNb^Yjg-Y)FZmj-`@!{xm#ebEj4xug8nQc=emCY<=Foke*pGECc!##y`Bn=w zMI}7s$8i0OZ%?|caEAz;L~P!uHyhCNNr=!bk!@*W*)aBi%Q}flqkd`>l}7z0W;fif zBiMPET@DgkDBoI2#ZKMIK1a0Z4`2=JexsGV8%=2mVyz@DdI!eSKHN$_*7z{=jU3fk zp+AbpE@0MZEd_)PZrsxRd7L9XW^dV!--nJx!P>7I#2kZzL$OamLAe0*~ZXC`b`S6wf z%AtFmo*7_l4=PP_!qQumv4hLj{JP<9?VrVcV0<(E*H^DF#`eI1w0xWvv6r@DA7z$X z4zqy~@1oyD${!{f+lRrGDa&pDW_oyf347*H1HyA`bNl7(Ueg|E4?#+X@a|Cb&|ro2 zDe&#%N=5o1c1dtadS6-J5P?OBm3bsRLk9URab#UBXUOsd0Xl#f&g#5@wL)5ccsbn> zuu{38K(F+|THb~NiubeLF6hnG{Mz~0vE>^Ia85@Y^)4)O5RxH-+e44MK72tz8BAV- zsOjutD*qXIfdfIZrsDS`x5$;D#s_h#C0X~yb?MUlj#3~6I0i%jyMP724?rNW0hk7y z1%?4{1J45206Aa+(tuhZ8mIym0+)gQz*gW<;2U5R@ILT7@DHFj@DlJx;BUYKz-z!0 zz>h!>uo0LEd;uf^2Y}~*UjPEE1ttS$fFZz}z|+7_Kq#;smuSEzpKov1e@wy~Hq(&WaVms5OF;?w)e ztyS8S0$IlWP*~WBepJhk*YjJX6Y^V1ffV2v5CQB0763m0fxre}8gLdE2D}YC3tR)_ zfC)$gYJq5=3RnnS2KobAfk%OFfKkBv!1KUAfZo7Mz#oCX0S^GL0Z#xw0ztq=U?%Vd zkO&+Ao&$aX2(T8I44eUm0B-_M13v+wz;<9Ra2^-~dx|3oL>(vC9o z>>{Nuuf?~M&lrnRBhN}H7&GBe`3m2za_RVkgOj%vJH5(Zx}qsOW*=)Wu`~8v&?(hj zrmXf!Cwa2mvH;~Mza_tghB<1?fs2NgQCaCN&ldckyU{t8TY_-IxJ|(?UH^!gfRm;E zOm|ojt-U#hjca+3iEDWSSOl~HL-@FsH-V>tpZI3keIrlCNHIq^PNjqd{k9+VzqPhITyxL9&e>`OfM*YB~v z?y>iSJx#5j=Xtd!hN`T4zs_opi;4GyG|^*!`X2k!9{a)f*bnyD@4d%9RIctJF1M6t+v} zBD2m~5hD7H`aZ-euj$-Eg{qa&R zjDyYz%QC+{3Pb%rwkNs{9(}!8({YGRN1sN@<@?31VOnI(%9gx1O^fN-+I&-f$+qa` zLu_7a?`9f8sb$O=CL>)Q%PQp~Sc5#C)u2D3gC4V#)`!f8up;!2$BxdqdOd#U*Uhva zA>m3|HRzwb16LiudXso(C7gSQBkjBV9riB*-DWo1_ z%!(^mG%A7cOcW9Efr^jT*R}z{VijwA2Ad&DLqsD)R9aC_wbuAZ)JB6u(~gKQuz@t# zM2*sY4^XY8S!MM{SFdZ&xp#Z#yWe-`ockE&?)}o-8~j`1NteUXO-;{wZAm;ARvCM2 zXy}){rzbpK8GDrQUH-k%qD!eJcey|qBG%3I1(np-xngC=ueC!+=;;j5U%kMJqah`F~XgA zg0Lq%iDY6%0mK5L2j_#>4kMh2u7n%mMR*WqU*K zA~8lTQe~QbbX(frKz4+^h?(djHpo0wiDFfzcEZRk6UQ)IG^3u~e4gB>vh>!na07is zAMB$tPd8blW@$Et29LI6F zMxD@XNAmT>YV7Bp%wu%Pd~&2}W;ekjCPA$aR< zg;@?%^0pn00&@_d8%Bq?N_ma?@kv3 z;EG(_Wxh2!ZXm0;CN41Ftw@kJ)Jn~Iu?l~>Y=ol#X>x?B*6e9kk&m^=p-R(Axm#_~ zY_k}N7%ay%IO7AkS30LxPz&f zf!*kadUZ^*?dRJ*Yho4GS#2CPaSS=ByE-!qtEfd8^ZgVxa=fzXxc;ILH}O8+WzJjW zj@CW7msQkotX08wGLJ=SU59fR3hI+GJ(Gmk zXQLJ@+ZZknV*5PXP_(q16~5$nc(7ubtdk$f^#+HHLP&`)&K1(vq|saaL^?HRH;r&S zB7)qD0#od+V6ENw#yPx#FwVBY9>u&)nMmsy&yY7m#)X^>85B~UKQ;KK$=6h1iZ@*h zj_|4s%jHp;5gFfjPurBlW%?db;8Y4rR>f6IBwL6Yi@_7gqCvlhv;TfFu2cSTt~7By z2(1+awhYG5#$*wWB2d_i0G?Fa1hFXc^yj3v{Tp@;I2Emoj*VRI^5x93foE1fcImakUcVb=_vmzg@qr&D z{>tx*=}&$9 zoq$8n*?+sRG9tdf_vX}oCnHz5WtI;*JI=Fq;?}_DvmC#v2sj6CQ*j*ug{yTZ`p^HdQH?Gl)ha TnXnNL<(8NpVaS{P-%Ni8?M4Cp diff --git a/Roland_A_PRO/Preset.syx b/Roland_A_PRO/Preset.syx deleted file mode 100644 index 284fd5efca757f303afdd0650981f0e32fa1b16a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6345 zcmcK8TS`Mg30^B^UGWCTGtoPJF* zGl|Fd9?{2uhD21W(D@&JhNvJ)4Su-$`gbZ;lVCEK#jGyioWdoQy1EvU>D6?Dp-hz>d z&pk4AMCQSQ327l}z$`47km;ZV=Fx%)8RDwIEG?Lj@wNbFWx<5ZO$T6}ESQkF=^L1} z|IKH7<0mj1ZDjZ(r0w)SXR_V2HDD;DX0kLr^S4!vD#qaJA73F&J4<9jT8J!7d!nj7 mGX51ql-*4SN=2%qRw~Psx~Ws)f3T){d67bz@Bfnbb@~A;cZ>G` diff --git a/microKONTROL/Preset.syx b/microKONTROL/Preset.syx deleted file mode 100644 index f42a2f68f81a1be61f00eb8ba6a6db57db4ae4c0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 103 zcmez1>|$ZSU}9#)z`(%(1i1{1ObiTp5)Ax&-vM4q4|D(k