Implement FASE 3, 4, 6 - 15 new MCP tools, 76/110 tasks complete

FASE 3 - Human Feel & Dynamics (10/11 tasks):
- apply_clip_fades() - T041: Fade automation per section
- write_volume_automation() - T042: Curves (linear, exp, s_curve, punch)
- apply_sidechain_pump() - T045: Sidechain by intensity/style
- inject_pattern_fills() - T048: Snare rolls, fills by density
- humanize_set() - T050: Timing + velocity + groove automation

FASE 4 - Key Compatibility & Tonal (9/12 tasks):
- audio_key_compatibility.py: Full KEY_COMPATIBILITY_MATRIX
- analyze_key_compatibility() - T053: Harmonic compatibility scoring
- suggest_key_change() - T054: Circle of fifths modulation
- validate_sample_key() - T055: Sample key validation
- analyze_spectral_fit() - T057/T062: Spectral role matching

FASE 6 - Mastering & QA (8/13 tasks):
- calibrate_gain_staging() - T079: Auto gain by bus targets
- run_mix_quality_check() - T085: LUFS, peaks, L/R balance
- export_stem_mixdown() - T087: 24-bit/44.1kHz stem export

New files:
- audio_key_compatibility.py (T052)
- bus_routing_fix.py (T101-T104)
- validation_system_fix.py (T105-T106)

Total: 76/110 tasks (69%), 71 MCP tools exposed

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
renato97
2026-03-29 00:59:24 -03:00
parent ed6f75c49f
commit 4332ff65da
24 changed files with 6586 additions and 38 deletions

View File

@@ -0,0 +1,143 @@
"""
benchmark.py - Performance profiling de generación
T107-T110: Benchmarking y profiling
"""
import time
import logging
from typing import Dict, Any, List
from statistics import mean, stdev
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("Benchmark")
class PerformanceBenchmark:
"""Benchmark de rendimiento del sistema."""
def __init__(self):
self.results: Dict[str, List[float]] = {}
def benchmark_generation(self, n_runs: int = 5) -> Dict[str, Any]:
"""
Benchmark de generación completa.
Args:
n_runs: Número de ejecuciones
Returns:
Estadísticas de rendimiento
"""
from full_integration import generate_complete_track
times = []
for i in range(n_runs):
start = time.time()
result = generate_complete_track("techno", seed=1000 + i)
elapsed = time.time() - start
times.append(elapsed)
logger.info(f"Run {i+1}/{n_runs}: {elapsed:.2f}s")
return {
'operation': 'full_generation',
'n_runs': n_runs,
'mean_time': mean(times),
'stdev_time': stdev(times) if len(times) > 1 else 0,
'min_time': min(times),
'max_time': max(times),
'total_time': sum(times),
}
def benchmark_component(self, component_name: str, func, *args, n_runs: int = 10) -> Dict[str, Any]:
"""Benchmark de componente específico."""
times = []
for _ in range(n_runs):
start = time.time()
func(*args)
elapsed = time.time() - start
times.append(elapsed)
return {
'component': component_name,
'n_runs': n_runs,
'mean_time': mean(times),
'min_time': min(times),
'max_time': max(times),
}
def run_full_benchmark(self) -> Dict[str, Any]:
"""Ejecuta benchmark completo de todos los componentes."""
results = {}
# Benchmark generación completa
logger.info("Benchmarking full generation...")
results['full_generation'] = self.benchmark_generation(n_runs=3)
# Benchmark HumanFeelEngine
logger.info("Benchmarking HumanFeelEngine...")
from human_feel import HumanFeelEngine
engine = HumanFeelEngine(seed=42)
notes = [{'pitch': 60, 'start': float(i), 'velocity': 100} for i in range(100)]
results['human_feel'] = self.benchmark_component(
'HumanFeelEngine.process_notes',
engine.process_notes,
notes, 'drop', True, 'shuffle',
n_runs=100
)
# Benchmark AutoPrompter
logger.info("Benchmarking AutoPrompter...")
from self_ai import AutoPrompter
prompter = AutoPrompter()
vibes = ["techno", "house", "trance", "drum and bass", "deep house"]
results['auto_prompter'] = self.benchmark_component(
'AutoPrompter.generate_from_vibe',
lambda: [prompter.generate_from_vibe(v) for v in vibes],
n_runs=10
)
# Benchmark DJArrangementEngine
logger.info("Benchmarking DJArrangementEngine...")
from audio_arrangement import DJArrangementEngine
arr_engine = DJArrangementEngine(seed=42)
results['arrangement'] = self.benchmark_component(
'DJArrangementEngine.generate_structure',
arr_engine.generate_structure,
'standard',
n_runs=50
)
# Summary
logger.info("\n" + "="*50)
logger.info("BENCHMARK SUMMARY")
logger.info("="*50)
for name, data in results.items():
if 'mean_time' in data:
logger.info(f"{name}: {data['mean_time']:.4f}s (avg)")
return results
def main():
"""Ejecuta benchmark desde línea de comandos."""
import sys
n_runs = int(sys.argv[1]) if len(sys.argv) > 1 else 3
benchmark = PerformanceBenchmark()
results = benchmark.run_full_benchmark()
# Guardar resultados
import json
from pathlib import Path
output_path = Path("benchmark_results.json")
with open(output_path, 'w') as f:
json.dump(results, f, indent=2)
logger.info(f"\nResults saved to {output_path}")
if __name__ == '__main__':
main()