- Add _cmd_create_arrangement_audio_pattern with 5-method fallback chain - Method 1: track.insert_arrangement_clip() [Live 12+] - Method 2: track.create_audio_clip() [Live 11+] - Method 3: arrangement_clips.add_new_clip() [Live 12+] - Method 4: Session->duplicate_clip_to_arrangement [Legacy] - Method 5: Session->Recording [Universal] - Add _cmd_duplicate_clip_to_arrangement for session-to-arrangement workflow - Update skills documentation - Verified: 3 clips created at positions [0, 4, 8] in Arrangement View Closes: Audio injection in Arrangement View
1561 lines
61 KiB
Python
1561 lines
61 KiB
Python
"""
|
|
Harmony Engine - Motor de Inteligencia Musical Avanzada para AbletonMCP_AI.
|
|
|
|
Este módulo proporciona análisis musical sofisticado, generación de armonías,
|
|
variación inteligente de loops, manipulación avanzada de samples, y
|
|
comparación con referencias profesionales.
|
|
|
|
Clases principales:
|
|
- ProjectAnalyzer: Análisis de key, energía y balance de secciones
|
|
- CounterMelodyGenerator: Generación de contra-melodías y armonías
|
|
- VariationEngine: Variación inteligente de loops y secciones
|
|
- SampleIntelligence: Manipulación avanzada de samples
|
|
- ReferenceMatcher: Comparación y adaptación a referencias
|
|
|
|
Tareas implementadas:
|
|
- Parte 1 (T041-T045): Análisis y Adaptación
|
|
- Parte 2 (T046-T050): Variación Inteligente
|
|
- Parte 3 (T051-T055): Samples Inteligentes
|
|
- Parte 4 (T056-T060): Referencia y Comparación
|
|
"""
|
|
import json
|
|
import logging
|
|
import os
|
|
import random
|
|
from collections import Counter
|
|
from dataclasses import dataclass, field
|
|
from pathlib import Path
|
|
from typing import Any, Dict, List, Optional, Tuple, Union
|
|
|
|
import numpy as np
|
|
|
|
logger = logging.getLogger("HarmonyEngine")
|
|
|
|
|
|
# =============================================================================
|
|
# DATACLASSES - Perfiles y Métricas Musicales
|
|
# =============================================================================
|
|
|
|
@dataclass
|
|
class EnergyCurve:
|
|
"""Perfil de energía a lo largo de una canción o sección.
|
|
|
|
Atributos:
|
|
bars: Posiciones en compases donde se midió la energía
|
|
levels: Niveles de energía (0.0-1.0) en cada posición
|
|
section_names: Nombres de las secciones correspondientes
|
|
"""
|
|
bars: List[int] = field(default_factory=list)
|
|
levels: List[float] = field(default_factory=list)
|
|
section_names: List[str] = field(default_factory=list)
|
|
|
|
def get_level_at(self, bar: int) -> float:
|
|
"""Obtiene nivel de energía en un compás específico."""
|
|
if not self.bars:
|
|
return 0.5
|
|
closest_idx = min(range(len(self.bars)), key=lambda i: abs(self.bars[i] - bar))
|
|
return self.levels[closest_idx] if closest_idx < len(self.levels) else 0.5
|
|
|
|
def get_average(self, start_bar: int, end_bar: int) -> float:
|
|
"""Calcula energía promedio entre dos compases."""
|
|
relevant = [l for b, l in zip(self.bars, self.levels) if start_bar <= b <= end_bar]
|
|
return np.mean(relevant) if relevant else 0.5
|
|
|
|
def get_peak_level(self) -> float:
|
|
"""Retorna el nivel de energía máximo."""
|
|
return max(self.levels) if self.levels else 0.0
|
|
|
|
def get_trough_level(self) -> float:
|
|
"""Retorna el nivel de energía mínimo."""
|
|
return min(self.levels) if self.levels else 0.0
|
|
|
|
def to_dict(self) -> Dict[str, Any]:
|
|
return {
|
|
"bars": self.bars,
|
|
"levels": self.levels,
|
|
"section_names": self.section_names,
|
|
}
|
|
|
|
@classmethod
|
|
def from_dict(cls, data: Dict[str, Any]) -> "EnergyCurve":
|
|
return cls(
|
|
bars=data.get("bars", []),
|
|
levels=data.get("levels", []),
|
|
section_names=data.get("section_names", []),
|
|
)
|
|
|
|
|
|
@dataclass
|
|
class SpectrumProfile:
|
|
"""Perfil espectral con frecuencias y magnitudes por banda.
|
|
|
|
Atributos:
|
|
frequencies: Lista de frecuencias en Hz
|
|
magnitudes: Lista de magnitudes en dB
|
|
low_energy: Energía en frecuencias bajas (20-250 Hz)
|
|
low_mid_energy: Energía en low-mid (250-500 Hz)
|
|
mid_energy: Energía en frecuencias medias (500-2000 Hz)
|
|
high_mid_energy: Energía en high-mid (2000-4000 Hz)
|
|
high_energy: Energía en frecuencias altas (4000-20000 Hz)
|
|
"""
|
|
frequencies: List[float] = field(default_factory=list)
|
|
magnitudes: List[float] = field(default_factory=list)
|
|
low_energy: float = 0.0
|
|
low_mid_energy: float = 0.0
|
|
mid_energy: float = 0.0
|
|
high_mid_energy: float = 0.0
|
|
high_energy: float = 0.0
|
|
|
|
def get_balance_score(self) -> float:
|
|
"""Retorna score de balance espectral (0.0-1.0)."""
|
|
energies = [self.low_energy, self.low_mid_energy, self.mid_energy,
|
|
self.high_mid_energy, self.high_energy]
|
|
if not any(energies):
|
|
return 0.5
|
|
ideal = [0.25, 0.15, 0.25, 0.20, 0.15]
|
|
normalized = [e/sum(energies) for e in energies]
|
|
deviation = sum(abs(n - i) for n, i in zip(normalized, ideal))
|
|
return max(0.0, 1.0 - deviation)
|
|
|
|
def get_dominant_frequency_range(self) -> str:
|
|
"""Determina el rango de frecuencia dominante."""
|
|
energies = {
|
|
"low": self.low_energy,
|
|
"low_mid": self.low_mid_energy,
|
|
"mid": self.mid_energy,
|
|
"high_mid": self.high_mid_energy,
|
|
"high": self.high_energy,
|
|
}
|
|
return max(energies.items(), key=lambda x: x[1])[0]
|
|
|
|
def to_dict(self) -> Dict[str, Any]:
|
|
return {
|
|
"frequencies": self.frequencies,
|
|
"magnitudes": self.magnitudes,
|
|
"low_energy": self.low_energy,
|
|
"low_mid_energy": self.low_mid_energy,
|
|
"mid_energy": self.mid_energy,
|
|
"high_mid_energy": self.high_mid_energy,
|
|
"high_energy": self.high_energy,
|
|
}
|
|
|
|
@classmethod
|
|
def from_dict(cls, data: Dict[str, Any]) -> "SpectrumProfile":
|
|
return cls(
|
|
frequencies=data.get("frequencies", []),
|
|
magnitudes=data.get("magnitudes", []),
|
|
low_energy=data.get("low_energy", 0.0),
|
|
low_mid_energy=data.get("low_mid_energy", 0.0),
|
|
mid_energy=data.get("mid_energy", 0.0),
|
|
high_mid_energy=data.get("high_mid_energy", 0.0),
|
|
high_energy=data.get("high_energy", 0.0),
|
|
)
|
|
|
|
|
|
@dataclass
|
|
class StereoWidth:
|
|
"""Ancho estéreo por bandas de frecuencia.
|
|
|
|
Atributos:
|
|
low: Ancho en frecuencias bajas 20-250 Hz (ideal: mono)
|
|
mid_low: Ancho en rango 250-500 Hz
|
|
mid: Ancho en rango 500-2000 Hz
|
|
high: Ancho en frecuencias altas 2000+ Hz (ideal: ancho)
|
|
overall_width: Ancho estéreo general promedio
|
|
"""
|
|
low: float = 0.0
|
|
mid_low: float = 0.0
|
|
mid: float = 0.0
|
|
high: float = 0.0
|
|
overall_width: float = 0.0
|
|
|
|
def is_balanced(self) -> bool:
|
|
"""Verifica si el ancho estéreo está balanceado."""
|
|
return self.low <= 0.3 and self.high >= 0.5
|
|
|
|
def get_recommendations(self) -> List[str]:
|
|
"""Genera recomendaciones de ajuste de stereo width."""
|
|
recs = []
|
|
if self.low > 0.3:
|
|
recs.append("Reduce stereo width en frecuencias bajas (<250Hz) para evitar conflictos de fase")
|
|
if self.high < 0.5:
|
|
recs.append("Aumenta stereo width en frecuencias altas (>2kHz) para más ambiente")
|
|
if self.mid < 0.3:
|
|
recs.append("Considera aumentar ancho estéreo en rango medio para elementos principales")
|
|
return recs
|
|
|
|
def to_dict(self) -> Dict[str, Any]:
|
|
return {
|
|
"low": self.low,
|
|
"mid_low": self.mid_low,
|
|
"mid": self.mid,
|
|
"high": self.high,
|
|
"overall_width": self.overall_width,
|
|
}
|
|
|
|
@classmethod
|
|
def from_dict(cls, data: Dict[str, Any]) -> "StereoWidth":
|
|
return cls(
|
|
low=data.get("low", 0.0),
|
|
mid_low=data.get("mid_low", 0.0),
|
|
mid=data.get("mid", 0.0),
|
|
high=data.get("high", 0.0),
|
|
overall_width=data.get("overall_width", 0.0),
|
|
)
|
|
|
|
|
|
@dataclass
|
|
class SimilarityScore:
|
|
"""Puntuación de similitud multidimensional entre proyectos.
|
|
|
|
Atributos:
|
|
bpm_score: Similitud de BPM (0.0-1.0)
|
|
key_score: Similitud de tonalidad (0.0-1.0)
|
|
energy_score: Similitud de curva de energía (0.0-1.0)
|
|
spectrum_score: Similitud de espectro (0.0-1.0)
|
|
width_score: Similitud de ancho estéreo (0.0-1.0)
|
|
...weights: Pesos para cálculo del score total
|
|
"""
|
|
bpm_score: float = 0.0
|
|
key_score: float = 0.0
|
|
energy_score: float = 0.0
|
|
spectrum_score: float = 0.0
|
|
width_score: float = 0.0
|
|
bpm_weight: float = 0.20
|
|
key_weight: float = 0.15
|
|
energy_weight: float = 0.25
|
|
spectrum_weight: float = 0.25
|
|
width_weight: float = 0.15
|
|
|
|
@property
|
|
def total(self) -> float:
|
|
"""Calcula score total ponderado."""
|
|
total_weight = sum([self.bpm_weight, self.key_weight, self.energy_weight,
|
|
self.spectrum_weight, self.width_weight])
|
|
if total_weight == 0:
|
|
return 0.0
|
|
score = (
|
|
self.bpm_score * self.bpm_weight +
|
|
self.key_score * self.key_weight +
|
|
self.energy_score * self.energy_weight +
|
|
self.spectrum_score * self.spectrum_weight +
|
|
self.width_score * self.width_weight
|
|
) / total_weight
|
|
return round(score, 3)
|
|
|
|
def to_dict(self) -> Dict[str, Any]:
|
|
return {
|
|
"bpm_score": self.bpm_score,
|
|
"key_score": self.key_score,
|
|
"energy_score": self.energy_score,
|
|
"spectrum_score": self.spectrum_score,
|
|
"width_score": self.width_score,
|
|
"total": self.total,
|
|
"weights": {
|
|
"bpm": self.bpm_weight,
|
|
"key": self.key_weight,
|
|
"energy": self.energy_weight,
|
|
"spectrum": self.spectrum_weight,
|
|
"width": self.width_weight,
|
|
}
|
|
}
|
|
|
|
@classmethod
|
|
def from_dict(cls, data: Dict[str, Any]) -> "SimilarityScore":
|
|
weights = data.get("weights", {})
|
|
return cls(
|
|
bpm_score=data.get("bpm_score", 0.0),
|
|
key_score=data.get("key_score", 0.0),
|
|
energy_score=data.get("energy_score", 0.0),
|
|
spectrum_score=data.get("spectrum_score", 0.0),
|
|
width_score=data.get("width_score", 0.0),
|
|
bpm_weight=weights.get("bpm", 0.20),
|
|
key_weight=weights.get("key", 0.15),
|
|
energy_weight=weights.get("energy", 0.25),
|
|
spectrum_weight=weights.get("spectrum", 0.25),
|
|
width_weight=weights.get("width", 0.15),
|
|
)
|
|
|
|
|
|
# =============================================================================
|
|
# PARTE 1 - Análisis y Adaptación (T041-T045)
|
|
# =============================================================================
|
|
|
|
class ProjectAnalyzer:
|
|
"""
|
|
Analiza proyectos musicales para extraer información clave.
|
|
|
|
Métodos:
|
|
- T041: analyze_project_key() - Detecta key predominante de notas MIDI
|
|
- T042: harmonize_track() - Genera notas armonizadas con progresión
|
|
- T043: detect_energy_curve() - Grafica energía de la canción
|
|
- T044: balance_sections() - Ajusta energía entre secciones
|
|
"""
|
|
|
|
NOTE_NAMES = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
|
|
KEY_PROFILES = {
|
|
'C': [1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],
|
|
'G': [0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1],
|
|
'D': [0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
|
|
'A': [0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0],
|
|
'E': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1],
|
|
'Am': [1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
|
|
'Em': [0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0],
|
|
'Dm': [0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0],
|
|
'Gm': [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0],
|
|
'Cm': [0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0],
|
|
}
|
|
|
|
def analyze_project_key(self, tracks: List[Dict[str, Any]]) -> Dict[str, Any]:
|
|
"""
|
|
T041: Analiza notas MIDI de múltiples tracks y detecta la key predominante.
|
|
|
|
Args:
|
|
tracks: Lista de tracks con información de notas MIDI
|
|
|
|
Returns:
|
|
Dict con key detectada, confianza, keys alternativas, distribución de notas
|
|
"""
|
|
all_notes = []
|
|
for track in tracks:
|
|
if 'notes' in track:
|
|
all_notes.extend(track['notes'])
|
|
elif 'clips' in track:
|
|
for clip in track['clips']:
|
|
if 'notes' in clip:
|
|
all_notes.extend(clip['notes'])
|
|
|
|
if not all_notes:
|
|
return {"key": "Am", "confidence": 0.0, "alternative_keys": [],
|
|
"note_distribution": {}, "scale_type": "minor"}
|
|
|
|
pitches = [n['pitch'] % 12 for n in all_notes if 'pitch' in n]
|
|
if not pitches:
|
|
return {"key": "Am", "confidence": 0.0, "alternative_keys": [],
|
|
"note_distribution": {}, "scale_type": "minor"}
|
|
|
|
chroma_counts = Counter(pitches)
|
|
total = len(pitches)
|
|
distribution = [chroma_counts.get(i, 0) / total for i in range(12)]
|
|
|
|
best_key, best_score = None, -1
|
|
scores = {}
|
|
for key_name, profile in self.KEY_PROFILES.items():
|
|
correlation = np.corrcoef(distribution, profile)[0, 1]
|
|
if np.isnan(correlation):
|
|
correlation = 0.0
|
|
scores[key_name] = correlation
|
|
if correlation > best_score:
|
|
best_score, best_key = correlation, key_name
|
|
|
|
alt_keys = sorted(scores.items(), key=lambda x: x[1], reverse=True)[1:4]
|
|
scale_type = "major" if len(best_key) == 1 or best_key[-1] != 'm' else "minor"
|
|
|
|
return {
|
|
"key": best_key,
|
|
"confidence": round(best_score, 3),
|
|
"alternative_keys": [{"key": k, "confidence": round(s, 3)} for k, s in alt_keys],
|
|
"note_distribution": {self.NOTE_NAMES[i]: round(chroma_counts.get(i, 0) / total, 3) for i in range(12)},
|
|
"scale_type": scale_type,
|
|
"total_notes_analyzed": total,
|
|
}
|
|
|
|
def harmonize_track(self, track_index: int, chord_progression: List[str],
|
|
harmony_level: str = "triads") -> Dict[str, Any]:
|
|
"""
|
|
T042: Genera notas armonizadas para un track basado en progresión de acordes.
|
|
|
|
Args:
|
|
track_index: Índice del track a armonizar
|
|
chord_progression: Lista de acordes (e.g., ['Am', 'F', 'C', 'G'])
|
|
harmony_level: Nivel de armonía ('triads', 'sevenths', 'extended')
|
|
|
|
Returns:
|
|
Dict con notas generadas y configuración
|
|
"""
|
|
chord_structures = {
|
|
'Am': [0, 3, 7], 'Dm': [2, 5, 9], 'Em': [4, 7, 11],
|
|
'Gm': [7, 10, 2], 'Bm': [11, 2, 6],
|
|
'C': [0, 4, 7], 'F': [5, 9, 0], 'G': [7, 11, 2],
|
|
'D': [2, 6, 9], 'A': [9, 1, 4], 'E': [4, 8, 11],
|
|
}
|
|
seventh_extensions = {
|
|
'Am': 10, 'Dm': 0, 'Em': 2, 'Gm': 5, 'Bm': 9,
|
|
'C': 11, 'F': 4, 'G': 6, 'D': 1, 'A': 8, 'E': 3,
|
|
}
|
|
|
|
generated_notes = []
|
|
for bar_idx, chord in enumerate(chord_progression):
|
|
if chord not in chord_structures:
|
|
continue
|
|
base_notes = chord_structures[chord][:]
|
|
if harmony_level in ('sevenths', 'extended') and chord in seventh_extensions:
|
|
base_notes.append(seventh_extensions[chord])
|
|
|
|
for note_offset in base_notes:
|
|
pitch = (69 + note_offset) % 12 + 57
|
|
generated_notes.append({
|
|
"pitch": pitch,
|
|
"start_time": bar_idx * 4.0,
|
|
"duration": 4.0,
|
|
"velocity": 80,
|
|
})
|
|
|
|
return {
|
|
"track_index": track_index,
|
|
"chord_progression": chord_progression,
|
|
"harmony_level": harmony_level,
|
|
"notes_generated": len(generated_notes),
|
|
"notes": generated_notes,
|
|
"bars_covered": len(chord_progression),
|
|
}
|
|
|
|
def detect_energy_curve(self, arrangement: Dict[str, Any]) -> EnergyCurve:
|
|
"""
|
|
T043: Detecta y grafica la curva de energía del arreglo.
|
|
|
|
Args:
|
|
arrangement: Dict con información de secciones y tracks
|
|
|
|
Returns:
|
|
EnergyCurve con niveles por compás
|
|
"""
|
|
sections = arrangement.get('sections', [])
|
|
tracks = arrangement.get('tracks', [])
|
|
|
|
if not sections:
|
|
return EnergyCurve(
|
|
bars=list(range(0, 64, 4)),
|
|
levels=[0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 0.8, 0.6, 0.9, 1.0, 0.7, 0.5, 0.4, 0.3],
|
|
section_names=['Intro', 'Build 1', 'Build 2', 'Drop A', 'Break', 'Build 3', 'Drop B', 'Outro']
|
|
)
|
|
|
|
section_energy = {
|
|
'intro': 0.30, 'verse': 0.40, 'build': 0.60, 'buildup': 0.60,
|
|
'pre-chorus': 0.60, 'drop': 1.00, 'chorus': 0.90, 'hook': 0.90,
|
|
'break': 0.40, 'breakdown': 0.40, 'bridge': 0.50, 'outro': 0.30,
|
|
}
|
|
|
|
bars, levels, names, current_bar = [], [], [], 0
|
|
for section in sections:
|
|
name = section.get('name', 'Unknown').lower()
|
|
duration = section.get('duration_bars', 8)
|
|
base_energy = next((v for k, v in section_energy.items() if k in name), 0.5)
|
|
density = section.get('active_tracks', len(tracks)) / max(len(tracks), 1)
|
|
adjusted = base_energy * (0.7 + 0.3 * density)
|
|
|
|
bars.append(current_bar)
|
|
levels.append(round(min(1.0, adjusted), 2))
|
|
names.append(name.title())
|
|
current_bar += duration
|
|
|
|
return EnergyCurve(bars=bars, levels=levels, section_names=names)
|
|
|
|
def balance_sections(self, sections: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
"""
|
|
T044: Ajusta los niveles de energía entre secciones.
|
|
|
|
Args:
|
|
sections: Lista de secciones a balancear
|
|
|
|
Returns:
|
|
Lista de secciones con niveles ajustados
|
|
"""
|
|
targets = {
|
|
'intro': 0.30, 'verse': 0.40, 'build': 0.60, 'buildup': 0.60,
|
|
'pre-chorus': 0.60, 'drop': 1.00, 'chorus': 0.90, 'hook': 0.90,
|
|
'break': 0.40, 'breakdown': 0.40, 'bridge': 0.50, 'outro': 0.30,
|
|
}
|
|
|
|
balanced = []
|
|
for section in sections:
|
|
name = section.get('name', 'Unknown').lower()
|
|
current = section.get('energy_level', 0.5)
|
|
target = next((v for k, v in targets.items() if k in name), 0.5)
|
|
adjustment = target - current
|
|
|
|
suggestions = []
|
|
if adjustment > 0.2:
|
|
suggestions.extend([
|
|
f"Añadir {int(adjustment * 100)}% más elementos",
|
|
"Subir volumen de drums"
|
|
])
|
|
elif adjustment < -0.2:
|
|
suggestions.extend([
|
|
f"Reducir {int(abs(adjustment) * 100)}% densidad",
|
|
"Bajar volumen de pads"
|
|
])
|
|
|
|
balanced.append({
|
|
**section,
|
|
"target_energy": target,
|
|
"current_energy": current,
|
|
"adjustment_needed": round(adjustment, 2),
|
|
"suggested_adjustments": suggestions,
|
|
"is_balanced": abs(adjustment) < 0.15,
|
|
})
|
|
|
|
return balanced
|
|
|
|
|
|
class CounterMelodyGenerator:
|
|
"""
|
|
Genera contra-melodías que complementan melodías principales.
|
|
|
|
T045: generate_counter_melody() - Usa intervalos consonantes: 3rds, 6ths
|
|
"""
|
|
|
|
INTERVALS = {
|
|
'third_major': 4, 'third_minor': 3, 'fifth': 7,
|
|
'sixth_major': 9, 'sixth_minor': 8, 'octave': 12, 'fourth': 5,
|
|
}
|
|
MAJOR_SCALE = [0, 2, 4, 5, 7, 9, 11]
|
|
MINOR_SCALE = [0, 2, 3, 5, 7, 8, 10]
|
|
|
|
def generate_counter_melody(self, main_melody_track: Dict[str, Any],
|
|
harmony_level: str = "thirds") -> Dict[str, Any]:
|
|
"""
|
|
T045: Genera una contra-melodía basada en la melodía principal.
|
|
|
|
Args:
|
|
main_melody_track: Track con la melodía principal
|
|
harmony_level: Nivel de armonía ('thirds', 'sixths', 'mixed', 'complementary')
|
|
|
|
Returns:
|
|
Dict con notas de contra-melodía generadas
|
|
"""
|
|
notes = main_melody_track.get('notes', [])
|
|
if not notes:
|
|
return {"notes": [], "harmony_level": harmony_level, "status": "empty_source"}
|
|
|
|
scale = self._detect_scale(notes)
|
|
key_center = self._detect_key_center(notes)
|
|
counter_notes = []
|
|
|
|
for note in notes:
|
|
pitch = note.get('pitch', 60)
|
|
interval = self._select_interval(pitch, scale, harmony_level, key_center)
|
|
counter_pitch = self._quantize_to_scale(pitch + interval, scale, key_center)
|
|
|
|
if harmony_level in ('thirds', 'fifths') and counter_pitch > pitch + 4:
|
|
counter_pitch -= 12
|
|
elif harmony_level == 'sixths' and counter_pitch < pitch:
|
|
counter_pitch += 12
|
|
|
|
counter_notes.append({
|
|
"pitch": counter_pitch,
|
|
"start_time": note.get('start_time', 0),
|
|
"duration": note.get('duration', 0.25),
|
|
"velocity": int(note.get('velocity', 100) * 0.85),
|
|
})
|
|
|
|
return {
|
|
"notes": counter_notes,
|
|
"harmony_level": harmony_level,
|
|
"source_note_count": len(notes),
|
|
"generated_note_count": len(counter_notes),
|
|
"detected_scale": scale,
|
|
"key_center": key_center,
|
|
"status": "success",
|
|
}
|
|
|
|
def _detect_scale(self, notes: List[Dict[str, Any]]) -> List[int]:
|
|
pitches = [n['pitch'] % 12 for n in notes if 'pitch' in n]
|
|
if not pitches:
|
|
return self.MINOR_SCALE
|
|
counts = Counter(pitches)
|
|
major_score = sum(counts.get(p, 0) for p in self.MAJOR_SCALE)
|
|
minor_score = sum(counts.get(p, 0) for p in self.MINOR_SCALE)
|
|
return self.MAJOR_SCALE if major_score > minor_score else self.MINOR_SCALE
|
|
|
|
def _detect_key_center(self, notes: List[Dict[str, Any]]) -> int:
|
|
pitches = [n['pitch'] % 12 for n in notes if 'pitch' in n]
|
|
return Counter(pitches).most_common(1)[0][0] if pitches else 0
|
|
|
|
def _select_interval(self, pitch: int, scale: List[int], level: str, key_center: int) -> int:
|
|
relative = (pitch % 12 - key_center) % 12
|
|
if level == "thirds":
|
|
interval = self.INTERVALS['third_minor'] if 3 in scale else self.INTERVALS['third_major']
|
|
return interval * (-1 if relative in scale[:4] else 1)
|
|
elif level == "sixths":
|
|
return self.INTERVALS['sixth_minor'] if 3 in scale else self.INTERVALS['sixth_major']
|
|
elif level == "fifths":
|
|
return -self.INTERVALS['fifth']
|
|
elif level == "mixed":
|
|
return random.choice([
|
|
self.INTERVALS['third_minor'] if 3 in scale else self.INTERVALS['third_major'],
|
|
self.INTERVALS['sixth_minor'] if 3 in scale else self.INTERVALS['sixth_major'],
|
|
self.INTERVALS['fifth'],
|
|
])
|
|
return 3
|
|
|
|
def _quantize_to_scale(self, pitch: int, scale: List[int], key_center: int) -> int:
|
|
relative = (pitch % 12 - key_center) % 12
|
|
if relative in scale:
|
|
return pitch
|
|
distances = [(s, abs(relative - s)) for s in scale]
|
|
distances.extend([(s + 12, abs(relative - (s + 12))) for s in scale])
|
|
distances.extend([(s - 12, abs(relative - (s - 12))) for s in scale])
|
|
closest = min(distances, key=lambda x: x[1])[0]
|
|
return (pitch // 12) * 12 + ((key_center + closest) % 12)
|
|
|
|
|
|
# =============================================================================
|
|
# PARTE 2 - Variación Inteligente (T046-T050)
|
|
# =============================================================================
|
|
|
|
class VariationEngine:
|
|
"""
|
|
Motor de variación inteligente para loops y secciones.
|
|
|
|
Métodos:
|
|
- T046: variate_loop() - Genera variación de loop
|
|
- T047: add_call_and_response() - Call: 2 bars, Response: 2 bars
|
|
- T048: generate_breakdown() - Crea breakdown strip down
|
|
- T049: generate_drop_variation() - Drop A vs Drop B
|
|
- T050: create_outro() - Outro basado en intro con fade
|
|
"""
|
|
|
|
def variate_loop(self, loop_clips: List[Dict[str, Any]],
|
|
variation_intensity: float = 0.5) -> List[Dict[str, Any]]:
|
|
"""
|
|
T046: Genera una variación de loop existente.
|
|
|
|
Args:
|
|
loop_clips: Lista de clips a variar
|
|
variation_intensity: 0.0-1.0 (qué tan drástica la variación)
|
|
|
|
Returns:
|
|
Lista de clips variados
|
|
"""
|
|
varied_clips = []
|
|
techniques = []
|
|
if variation_intensity > 0.2:
|
|
techniques.append('velocity')
|
|
if variation_intensity > 0.4:
|
|
techniques.append('timing')
|
|
if variation_intensity > 0.6:
|
|
techniques.append('octave')
|
|
if variation_intensity > 0.7:
|
|
techniques.append('ornament')
|
|
if variation_intensity > 0.8:
|
|
techniques.append('rests')
|
|
|
|
for clip in loop_clips:
|
|
notes = clip.get('notes', [])
|
|
if not notes:
|
|
varied_clips.append(clip)
|
|
continue
|
|
|
|
varied_notes = notes[:]
|
|
for technique in techniques:
|
|
varied_notes = self._apply_technique(varied_notes, technique, variation_intensity)
|
|
|
|
varied_clips.append({
|
|
**clip,
|
|
"notes": varied_notes,
|
|
"is_variation": True,
|
|
"original_clip": clip.get('name', 'unknown'),
|
|
"variation_intensity": variation_intensity,
|
|
"techniques_applied": techniques,
|
|
})
|
|
|
|
return varied_clips
|
|
|
|
def _apply_technique(self, notes: List[Dict[str, Any]],
|
|
technique: str, intensity: float) -> List[Dict[str, Any]]:
|
|
varied = []
|
|
|
|
if technique == 'velocity':
|
|
for note in notes:
|
|
vel = note.get('velocity', 100)
|
|
variation = random.uniform(-20, 20) * intensity
|
|
varied.append({**note, "velocity": max(1, min(127, int(vel + variation)))})
|
|
|
|
elif technique == 'timing':
|
|
for note in notes:
|
|
start = note.get('start_time', 0)
|
|
varied.append({**note, "start_time": max(0, start + random.uniform(-0.05, 0.05) * intensity)})
|
|
|
|
elif technique == 'octave':
|
|
for note in notes:
|
|
if random.random() < intensity * 0.3:
|
|
pitch = note.get('pitch', 60)
|
|
varied.append({**note, "pitch": pitch + (12 if random.random() > 0.5 else -12)})
|
|
else:
|
|
varied.append(note)
|
|
|
|
elif technique == 'ornament':
|
|
for note in notes:
|
|
varied.append(note)
|
|
if random.random() < intensity * 0.2:
|
|
varied.append({
|
|
"pitch": note.get('pitch', 60) + random.choice([-1, 1, 2]),
|
|
"start_time": note.get('start_time', 0) - 0.02,
|
|
"duration": 0.02,
|
|
"velocity": min(127, int(note.get('velocity', 100) * 0.8)),
|
|
})
|
|
|
|
elif technique == 'rests':
|
|
for note in notes:
|
|
if random.random() > intensity * 0.15:
|
|
varied.append(note)
|
|
|
|
return varied if varied else notes
|
|
|
|
def add_call_and_response(self, phrase_track: Dict[str, Any],
|
|
response_length: int = 2) -> Dict[str, Any]:
|
|
"""
|
|
T047: Añade patrón Call and Response.
|
|
Call: 2 bars, Response: 2 bars
|
|
|
|
Args:
|
|
phrase_track: Track con la frase principal
|
|
response_length: Longitud del response en compases
|
|
|
|
Returns:
|
|
Dict con notas de call y response
|
|
"""
|
|
notes = phrase_track.get('notes', [])
|
|
if not notes:
|
|
return {"call_notes": [], "response_notes": []}
|
|
|
|
max_time = max(n.get('start_time', 0) for n in notes)
|
|
mid_point = max_time / 2
|
|
call_notes = [n for n in notes if n.get('start_time', 0) < mid_point]
|
|
|
|
transposition = random.choice([-7, -5, -3, 0, 3, 5, 7])
|
|
response_notes = []
|
|
for note in call_notes:
|
|
response_notes.append({
|
|
"pitch": note.get('pitch', 60) + transposition,
|
|
"start_time": note.get('start_time', 0) + mid_point,
|
|
"duration": note.get('duration', 0.25) * random.uniform(0.8, 1.2),
|
|
"velocity": max(1, min(127, int(note.get('velocity', 100) + random.uniform(-15, 15)))),
|
|
})
|
|
|
|
return {
|
|
"call_notes": call_notes,
|
|
"response_notes": response_notes,
|
|
"transposition_semitones": transposition,
|
|
"call_bars": 2,
|
|
"response_bars": response_length,
|
|
"pattern": "call_response",
|
|
}
|
|
|
|
def generate_breakdown(self, full_sections: List[Dict[str, Any]],
|
|
intensity: float = 0.3) -> Dict[str, Any]:
|
|
"""
|
|
T048: Crea un breakdown strip down reduciendo elementos.
|
|
|
|
Args:
|
|
full_sections: Secciones completas con todos los tracks
|
|
intensity: Cuánto mantener (0.3 = 30% de elementos)
|
|
|
|
Returns:
|
|
Dict con sección breakdown generada
|
|
"""
|
|
if not full_sections:
|
|
return {"tracks": [], "duration_bars": 8, "section_type": "breakdown"}
|
|
|
|
priority_roles = ['melody', 'lead', 'vocal', 'pad', 'atmosphere']
|
|
breakdown_tracks = []
|
|
|
|
for section in full_sections:
|
|
tracks = sorted(
|
|
section.get('tracks', []),
|
|
key=lambda t: priority_roles.index(t.get('role', '')) if t.get('role', '') in priority_roles else 999
|
|
)
|
|
kept = tracks[:max(1, int(len(tracks) * intensity))]
|
|
breakdown_tracks.extend([self._reduce_track_intensity(t, 0.5) for t in kept])
|
|
|
|
return {
|
|
"tracks": breakdown_tracks,
|
|
"duration_bars": 8,
|
|
"section_type": "breakdown",
|
|
"intensity": intensity,
|
|
"tracks_count": len(breakdown_tracks),
|
|
"original_tracks_count": sum(len(s.get('tracks', [])) for s in full_sections),
|
|
}
|
|
|
|
def _reduce_track_intensity(self, track: Dict[str, Any], factor: float) -> Dict[str, Any]:
|
|
return {
|
|
**track,
|
|
"notes": [{**n, "velocity": int(n.get('velocity', 100) * factor)} for n in track.get('notes', [])],
|
|
"volume_reduction_factor": factor,
|
|
}
|
|
|
|
def generate_drop_variation(self, drop_section: Dict[str, Any],
|
|
variation_type: str = "alt") -> Dict[str, Any]:
|
|
"""
|
|
T049: Genera variación de drop (Drop A vs Drop B).
|
|
|
|
Args:
|
|
drop_section: Sección drop original
|
|
variation_type: 'alt' para alternativa, 'intense' para más intenso
|
|
|
|
Returns:
|
|
Dict con drop variado
|
|
"""
|
|
varied_tracks = []
|
|
|
|
for track in drop_section.get('tracks', []):
|
|
notes = track.get('notes', [])
|
|
role = track.get('role', '')
|
|
|
|
if variation_type == "alt":
|
|
if role in ['drums', 'percussion']:
|
|
varied_notes = self._alternate_drum_pattern(notes)
|
|
elif role in ['bass', 'sub']:
|
|
varied_notes = self._invert_bass_line(notes)
|
|
else:
|
|
varied_notes = notes
|
|
else:
|
|
varied_notes = self._intensify_drums(notes) if role in ['drums', 'percussion'] else notes
|
|
|
|
varied_tracks.append({
|
|
**track,
|
|
"notes": varied_notes,
|
|
"is_variation": True,
|
|
"variation_type": variation_type,
|
|
})
|
|
|
|
return {
|
|
"tracks": varied_tracks,
|
|
"section_type": f"drop_{variation_type}",
|
|
"duration_bars": drop_section.get('duration_bars', 8),
|
|
"variation_of": drop_section.get('name', 'unknown'),
|
|
}
|
|
|
|
def _alternate_drum_pattern(self, notes: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
varied = []
|
|
for note in notes:
|
|
if note.get('pitch', 36) in [38, 40] and random.random() < 0.3:
|
|
varied.append({**note, "start_time": note.get('start_time', 0) + 0.5})
|
|
else:
|
|
varied.append(note)
|
|
return varied
|
|
|
|
def _invert_bass_line(self, notes: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
if not notes:
|
|
return notes
|
|
center = sum(n.get('pitch', 60) for n in notes) / len(notes)
|
|
return [{**note, "pitch": int(2 * center - note.get('pitch', 60))} for note in notes]
|
|
|
|
def _intensify_drums(self, notes: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
varied = notes[:]
|
|
for note in notes:
|
|
if note.get('pitch', 0) in [38, 40]:
|
|
varied.append({
|
|
**note,
|
|
"start_time": note.get('start_time', 0) + 0.25,
|
|
"velocity": 40,
|
|
"is_ghost": True,
|
|
})
|
|
return varied
|
|
|
|
def create_outro(self, intro_section: Dict[str, Any],
|
|
fade_duration: int = 8) -> Dict[str, Any]:
|
|
"""
|
|
T050: Crea un outro basado en la intro con fade out.
|
|
|
|
Args:
|
|
intro_section: Sección intro como base
|
|
fade_duration: Duración del fade en compases
|
|
|
|
Returns:
|
|
Dict con sección outro generada
|
|
"""
|
|
outro_tracks = []
|
|
|
|
for track in intro_section.get('tracks', []):
|
|
faded_notes = []
|
|
for note in track.get('notes', []):
|
|
fade_factor = max(0.0, 1.0 - (note.get('start_time', 0) / (fade_duration * 4)))
|
|
faded_notes.append({**note, "velocity": int(note.get('velocity', 100) * fade_factor)})
|
|
outro_tracks.append({**track, "notes": faded_notes, "has_fade": True})
|
|
|
|
return {
|
|
"tracks": outro_tracks,
|
|
"section_type": "outro",
|
|
"duration_bars": fade_duration,
|
|
"based_on": "intro",
|
|
"fade_duration": fade_duration,
|
|
}
|
|
|
|
|
|
# =============================================================================
|
|
# PARTE 3 - Samples Inteligentes (T051-T055)
|
|
# =============================================================================
|
|
|
|
class SampleIntelligence:
|
|
"""
|
|
Inteligencia avanzada para manipulación de samples.
|
|
|
|
Métodos:
|
|
- T051: find_and_replace_sample() - Busca alternativa similar
|
|
- T052: layer_samples() - Layer 2+ samples
|
|
- T053: create_sample_chain() - Encadena samples
|
|
- T054: generate_from_sample() - Genera canción basada en sample
|
|
- T055: create_vocal_chops() - Crea chops mapeados a Drum Rack
|
|
"""
|
|
|
|
def __init__(self, library_path: Optional[str] = None):
|
|
self.library_path = library_path or str(
|
|
Path(r"C:\ProgramData\Ableton\Live 12 Suite\Resources\MIDI Remote Scripts\libreria\reggaeton")
|
|
)
|
|
self._embedding_engine = None
|
|
|
|
def _get_embedding_engine(self):
|
|
if self._embedding_engine is None:
|
|
try:
|
|
from .embedding_engine import EmbeddingEngine
|
|
self._embedding_engine = EmbeddingEngine()
|
|
except ImportError:
|
|
self._embedding_engine = None
|
|
return self._embedding_engine
|
|
|
|
def find_and_replace_sample(self, current_sample_path: str,
|
|
similarity_threshold: float = 0.7) -> Dict[str, Any]:
|
|
"""
|
|
T051: Busca una alternativa similar al sample actual.
|
|
|
|
Args:
|
|
current_sample_path: Ruta al sample actual
|
|
similarity_threshold: Score mínimo de similitud (0.0-1.0)
|
|
|
|
Returns:
|
|
Dict con alternativas encontradas
|
|
"""
|
|
engine = self._get_embedding_engine()
|
|
if engine is None:
|
|
return self._fallback_find_similar(current_sample_path, similarity_threshold)
|
|
|
|
try:
|
|
similar = engine.find_similar(current_sample_path, top_n=10)
|
|
candidates = [s for s in similar if s.get('similarity', 0) >= similarity_threshold]
|
|
return {
|
|
"original_sample": current_sample_path,
|
|
"alternatives": candidates[:5],
|
|
"threshold_used": similarity_threshold,
|
|
"matches_found": len(candidates),
|
|
}
|
|
except:
|
|
return self._fallback_find_similar(current_sample_path, similarity_threshold)
|
|
|
|
def _fallback_find_similar(self, sample_path: str, threshold: float) -> Dict[str, Any]:
|
|
sample_dir = Path(sample_path).parent
|
|
sample_name = Path(sample_path).stem.lower()
|
|
alternatives = []
|
|
|
|
if sample_dir.exists():
|
|
for f in sample_dir.glob("*.wav"):
|
|
if f.name.lower() != sample_path.lower():
|
|
words1 = set(sample_name.split('_'))
|
|
words2 = set(f.stem.lower().split('_'))
|
|
if words1 & words2:
|
|
sim = len(words1 & words2) / len(words1 | words2)
|
|
if sim >= threshold:
|
|
alternatives.append({
|
|
"path": str(f),
|
|
"name": f.name,
|
|
"similarity": round(sim, 2),
|
|
})
|
|
|
|
return {
|
|
"original_sample": sample_path,
|
|
"alternatives": alternatives[:5],
|
|
"threshold_used": threshold,
|
|
"matches_found": len(alternatives),
|
|
"method": "fallback_name_matching",
|
|
}
|
|
|
|
def layer_samples(self, track_index: int, sample_paths: List[str],
|
|
volumes: Optional[List[float]] = None) -> Dict[str, Any]:
|
|
"""
|
|
T052: Crea un layer de 2+ samples.
|
|
|
|
Args:
|
|
track_index: Track donde colocar los samples
|
|
sample_paths: Lista de rutas de samples
|
|
volumes: Volumen para cada sample (0.0-1.0)
|
|
|
|
Returns:
|
|
Dict con configuración del layer
|
|
"""
|
|
valid = [p for p in sample_paths if os.path.exists(p)]
|
|
if len(valid) < 2:
|
|
return {"error": "Se necesitan al menos 2 samples válidos para layer"}
|
|
|
|
if volumes is None:
|
|
volumes = [1.0 / len(valid)] * len(valid)
|
|
|
|
total = sum(volumes)
|
|
if total > 1.0:
|
|
volumes = [v / total for v in volumes]
|
|
|
|
layers = []
|
|
for i, (path, vol) in enumerate(zip(valid, volumes)):
|
|
layers.append({
|
|
"sample_path": path,
|
|
"sample_name": Path(path).name,
|
|
"volume": round(vol, 3),
|
|
"track_position": i,
|
|
"pan": 0.0 if i == 0 else random.choice([-0.3, 0.3]),
|
|
})
|
|
|
|
return {
|
|
"track_index": track_index,
|
|
"num_layers": len(layers),
|
|
"layers": layers,
|
|
"total_volume": round(sum(l['volume'] for l in layers), 3),
|
|
"layering_strategy": "equal_blend" if len(set(volumes)) == 1 else "weighted_blend",
|
|
}
|
|
|
|
def create_sample_chain(self, sample_sequence: List[str],
|
|
transition_duration: float = 1.0) -> Dict[str, Any]:
|
|
"""
|
|
T053: Encadena múltiples samples en secuencia.
|
|
|
|
Args:
|
|
sample_sequence: Lista ordenada de samples
|
|
transition_duration: Duración de transiciones en compases
|
|
|
|
Returns:
|
|
Dict con cadena de samples configurada
|
|
"""
|
|
valid = [p for p in sample_sequence if os.path.exists(p)]
|
|
if not valid:
|
|
return {"error": "Secuencia vacía"}
|
|
|
|
chain = []
|
|
current_pos = 0.0
|
|
|
|
for i, path in enumerate(valid):
|
|
chain.append({
|
|
"sample_path": path,
|
|
"sample_name": Path(path).name,
|
|
"start_bar": current_pos,
|
|
"duration_bars": 4.0,
|
|
"transition_in": transition_duration if i > 0 else 0.0,
|
|
"transition_out": transition_duration if i < len(valid) - 1 else 0.0,
|
|
})
|
|
current_pos += 4.0
|
|
|
|
return {
|
|
"chain": chain,
|
|
"total_samples": len(chain),
|
|
"total_duration_bars": current_pos,
|
|
"transition_duration": transition_duration,
|
|
"chain_type": "sequential",
|
|
}
|
|
|
|
def generate_from_sample(self, seed_sample_path: str,
|
|
style: str = "inspired") -> Dict[str, Any]:
|
|
"""
|
|
T054: Genera canción/idea basada en un sample seed.
|
|
|
|
Args:
|
|
seed_sample_path: Ruta al sample de inspiración
|
|
style: Estilo de generación ('inspired', 'similar', 'remix')
|
|
|
|
Returns:
|
|
Dict con configuración de canción generada
|
|
"""
|
|
if not os.path.exists(seed_sample_path):
|
|
return {"error": f"Sample no encontrado: {seed_sample_path}"}
|
|
|
|
engine = self._get_embedding_engine()
|
|
features = engine.analyzer.get_features(seed_sample_path) if engine and hasattr(engine, 'analyzer') else {}
|
|
similar = engine.find_similar(seed_sample_path, top_n=10) if engine else []
|
|
|
|
bpm = features.get('bpm', 95)
|
|
key = features.get('key', 'Am')
|
|
|
|
structures = {
|
|
"inspired": ["intro", "build", "drop", "break", "drop", "outro"],
|
|
"similar": ["intro", "verse", "build", "drop", "break", "drop", "outro"],
|
|
"remix": ["intro_seed", "build", "drop_seed_mix", "break", "drop_remix", "outro_seed"],
|
|
}
|
|
|
|
return {
|
|
"seed_sample": seed_sample_path,
|
|
"style": style,
|
|
"extracted_features": features,
|
|
"suggested_bpm": bpm,
|
|
"suggested_key": key,
|
|
"structure": structures.get(style, structures["inspired"]),
|
|
"similar_samples_for_arrangement": similar[:5],
|
|
"recommended_tracks": self._suggest_tracks_for_style(style),
|
|
}
|
|
|
|
def _suggest_tracks_for_style(self, style: str) -> List[Dict[str, Any]]:
|
|
base = [
|
|
{"role": "kick", "type": "drum", "priority": "high"},
|
|
{"role": "snare", "type": "drum", "priority": "high"},
|
|
{"role": "hats", "type": "drum", "priority": "medium"},
|
|
{"role": "bass", "type": "bass", "priority": "high"},
|
|
]
|
|
|
|
if style == "inspired":
|
|
base.extend([
|
|
{"role": "melody", "type": "synth", "priority": "medium"},
|
|
{"role": "pad", "type": "synth", "priority": "low"},
|
|
])
|
|
elif style == "similar":
|
|
base.extend([
|
|
{"role": "lead", "type": "synth", "priority": "high"},
|
|
{"role": "arp", "type": "synth", "priority": "medium"},
|
|
{"role": "fx", "type": "fx", "priority": "low"},
|
|
])
|
|
elif style == "remix":
|
|
base.extend([
|
|
{"role": "seed_chops", "type": "sampler", "priority": "high"},
|
|
{"role": "stutter_fx", "type": "fx", "priority": "medium"},
|
|
{"role": "vocal_chops", "type": "sampler", "priority": "medium"},
|
|
])
|
|
|
|
return base
|
|
|
|
def create_vocal_chops(self, vocal_sample_path: str,
|
|
num_chops: int = 8) -> Dict[str, Any]:
|
|
"""
|
|
T055: Crea vocal chops y los mapea a Drum Rack.
|
|
|
|
Args:
|
|
vocal_sample_path: Ruta al sample vocal
|
|
num_chops: Número de chops a crear
|
|
|
|
Returns:
|
|
Dict con chops generados y mapeo a pads
|
|
"""
|
|
if not os.path.exists(vocal_sample_path):
|
|
return {"error": f"Vocal sample no encontrado: {vocal_sample_path}"}
|
|
|
|
positions = [i / num_chops + random.uniform(-0.05, 0.05) for i in range(num_chops)]
|
|
|
|
chops = []
|
|
for i, pos in enumerate(positions):
|
|
chops.append({
|
|
"chop_index": i,
|
|
"pad_note": 36 + i,
|
|
"start_position": pos,
|
|
"duration": 0.5,
|
|
"transient_strength": random.uniform(0.5, 1.0),
|
|
})
|
|
|
|
pattern = []
|
|
for i in range(8):
|
|
pattern.append({
|
|
"note": 36 + (i % num_chops),
|
|
"start_time": i * 0.5,
|
|
"velocity": 100 if i % 4 == 0 else 80,
|
|
})
|
|
|
|
return {
|
|
"source_sample": vocal_sample_path,
|
|
"num_chops": len(chops),
|
|
"chops": chops,
|
|
"drum_rack_mapping": {
|
|
"base_note": 36,
|
|
"note_range": f"36-{36 + len(chops) - 1}",
|
|
},
|
|
"suggested_pattern": pattern,
|
|
}
|
|
|
|
|
|
# =============================================================================
|
|
# PARTE 4 - Referencia y Comparación (T056-T060)
|
|
# =============================================================================
|
|
|
|
class ReferenceMatcher:
|
|
"""
|
|
Compara proyectos con referencias profesionales y adapta.
|
|
|
|
Métodos:
|
|
- T056: match_reference_energy() - Ajusta energía
|
|
- T057: match_reference_spectrum() - Ajusta EQ
|
|
- T058: match_reference_width() - Ajusta stereo width
|
|
- T059: generate_similarity_report() - Score por dimensión
|
|
- T060: adapt_to_reference_style() - Adapta estructura e instrumentación
|
|
"""
|
|
|
|
def match_reference_energy(self, project_tracks: List[Dict[str, Any]],
|
|
reference_energy_curve: EnergyCurve) -> Dict[str, Any]:
|
|
"""
|
|
T056: Ajusta la energía del proyecto para coincidir con referencia.
|
|
|
|
Args:
|
|
project_tracks: Tracks del proyecto actual
|
|
reference_energy_curve: Curva de energía de referencia
|
|
|
|
Returns:
|
|
Dict con ajustes sugeridos
|
|
"""
|
|
current = self._analyze_project_energy(project_tracks)
|
|
adjustments = []
|
|
|
|
for i, (bar, target) in enumerate(zip(reference_energy_curve.bars,
|
|
reference_energy_curve.levels)):
|
|
cur = current.get_level_at(bar)
|
|
diff = target - cur
|
|
|
|
if abs(diff) > 0.1:
|
|
adjustments.append({
|
|
"bar": bar,
|
|
"section": reference_energy_curve.section_names[i] if i < len(reference_energy_curve.section_names) else "unknown",
|
|
"target_energy": round(target, 2),
|
|
"current_energy": round(cur, 2),
|
|
"adjustment": round(diff, 2),
|
|
"suggestion": self._energy_suggestion(diff),
|
|
})
|
|
|
|
return {
|
|
"reference_curve": reference_energy_curve.to_dict(),
|
|
"current_curve": current.to_dict(),
|
|
"adjustments_needed": len(adjustments),
|
|
"adjustments": adjustments,
|
|
"overall_match_score": self._curve_similarity(current, reference_energy_curve),
|
|
}
|
|
|
|
def _analyze_project_energy(self, tracks: List[Dict[str, Any]]) -> EnergyCurve:
|
|
bars, levels = [], []
|
|
|
|
for bar in range(0, 64, 4):
|
|
energy = sum(
|
|
(np.mean([n.get('velocity', 100) for n in t.get('notes', []) if bar <= n.get('start_time', 0) < bar + 4] or [0]) / 127.0) *
|
|
min(1.0, len([n for n in t.get('notes', []) if bar <= n.get('start_time', 0) < bar + 4]) / 16)
|
|
for t in tracks
|
|
) / max(len(tracks), 1)
|
|
bars.append(bar)
|
|
levels.append(min(1.0, energy))
|
|
|
|
return EnergyCurve(bars=bars, levels=levels)
|
|
|
|
def _energy_suggestion(self, diff: float) -> str:
|
|
if diff > 0.3:
|
|
return "Añadir capas de drums y subir volumen general"
|
|
elif diff > 0.15:
|
|
return "Aumentar elementos percusivos o volumen de drums"
|
|
elif diff > 0:
|
|
return "Subir ligeramente volumen de elementos principales"
|
|
elif diff < -0.3:
|
|
return "Reducir drásticamente densidad de tracks"
|
|
elif diff < -0.15:
|
|
return "Bajar volumen de pads/synths"
|
|
return "Ajuste fino de balance"
|
|
|
|
def _curve_similarity(self, c1: EnergyCurve, c2: EnergyCurve) -> float:
|
|
min_len = min(len(c1.levels), len(c2.levels))
|
|
if min_len < 2:
|
|
return 0.5
|
|
corr = np.corrcoef(np.array(c1.levels[:min_len]), np.array(c2.levels[:min_len]))[0, 1]
|
|
return round((corr + 1) / 2, 3) if not np.isnan(corr) else 0.5
|
|
|
|
def match_reference_spectrum(self, project_eq: Dict[str, Any],
|
|
reference_spectrum: SpectrumProfile) -> Dict[str, Any]:
|
|
"""
|
|
T057: Compara y ajusta EQ para coincidir con referencia.
|
|
|
|
Args:
|
|
project_eq: EQ actual del proyecto
|
|
reference_spectrum: Perfil espectral de referencia
|
|
|
|
Returns:
|
|
Dict con recomendaciones de EQ
|
|
"""
|
|
current = project_eq.get('bands', {})
|
|
bands = [
|
|
('low', reference_spectrum.low_energy, current.get('low', 0.5)),
|
|
('low_mid', reference_spectrum.low_mid_energy, current.get('low_mid', 0.5)),
|
|
('mid', reference_spectrum.mid_energy, current.get('mid', 0.5)),
|
|
('high_mid', reference_spectrum.high_mid_energy, current.get('high_mid', 0.5)),
|
|
('high', reference_spectrum.high_energy, current.get('high', 0.5)),
|
|
]
|
|
|
|
eq_adj = []
|
|
for name, target, cur in bands:
|
|
diff = target - cur
|
|
if abs(diff) > 0.05:
|
|
eq_adj.append({
|
|
"band": name,
|
|
"target_db": round(target * 12 - 6, 1),
|
|
"current_db": round(cur * 12 - 6, 1),
|
|
"adjustment_db": round(diff * 12, 1),
|
|
"action": "boost" if diff > 0 else "cut",
|
|
})
|
|
|
|
distance = np.linalg.norm(np.array([b[1] for b in bands]) - np.array([b[2] for b in bands]))
|
|
|
|
return {
|
|
"reference_spectrum": reference_spectrum.to_dict(),
|
|
"current_eq": project_eq,
|
|
"eq_adjustments": eq_adj,
|
|
"spectrum_match_score": round(max(0, 1 - distance / 2), 3),
|
|
"needs_eq_work": len(eq_adj) > 2,
|
|
}
|
|
|
|
def match_reference_width(self, project_stereo: Dict[str, Any],
|
|
reference_width: StereoWidth) -> Dict[str, Any]:
|
|
"""
|
|
T058: Compara y ajusta ancho estéreo para coincidir con referencia.
|
|
|
|
Args:
|
|
project_stereo: Ancho estéreo actual del proyecto
|
|
reference_width: Ancho estéreo de referencia
|
|
|
|
Returns:
|
|
Dict con recomendaciones de ancho estéreo
|
|
"""
|
|
current = StereoWidth(
|
|
low=project_stereo.get('low', 0.1),
|
|
mid_low=project_stereo.get('mid_low', 0.3),
|
|
mid=project_stereo.get('mid', 0.5),
|
|
high=project_stereo.get('high', 0.7),
|
|
)
|
|
|
|
comps = [
|
|
("low", current.low, reference_width.low, 0.2),
|
|
("mid_low", current.mid_low, reference_width.mid_low, 0.4),
|
|
("mid", current.mid, reference_width.mid, 0.5),
|
|
("high", current.high, reference_width.high, 0.6),
|
|
]
|
|
|
|
width_adj = []
|
|
for band, cur, ref, tol in comps:
|
|
diff = cur - ref
|
|
if abs(diff) > tol:
|
|
width_adj.append({
|
|
"band": band,
|
|
"current_width": round(cur, 2),
|
|
"reference_width": round(ref, 2),
|
|
"difference": round(diff, 2),
|
|
"action": "narrow" if diff > 0 else "widen",
|
|
"suggestion": self._width_suggestion(band, diff),
|
|
})
|
|
|
|
match_score = max(0, 1 - np.mean([abs(c[1] - c[2]) for c in comps]))
|
|
|
|
return {
|
|
"reference_width": reference_width.to_dict(),
|
|
"current_width": current.to_dict(),
|
|
"width_adjustments": width_adj,
|
|
"width_match_score": round(match_score, 3),
|
|
"is_balanced": current.is_balanced(),
|
|
}
|
|
|
|
def _width_suggestion(self, band: str, diff: float) -> str:
|
|
if band == "low":
|
|
return "Usar Utility o EQ para mono en frecuencias bajas" if diff > 0 else "Más mono en bajos mejora potencia"
|
|
elif band == "high":
|
|
return "Añadir chorus o delay corto para ampliar agudos" if diff < 0 else "Más estrecho para evitar perder foco"
|
|
return "Considerar paneo más amplio en rango medio" if diff < 0 else "Más estrecho para mejor cohesión"
|
|
|
|
def generate_similarity_report(self, project: Dict[str, Any],
|
|
reference: Dict[str, Any]) -> Dict[str, Any]:
|
|
"""
|
|
T059: Genera reporte detallado de similitud por dimensiones.
|
|
|
|
Args:
|
|
project: Datos del proyecto actual
|
|
reference: Datos de la referencia
|
|
|
|
Returns:
|
|
Dict con SimilarityScore desglosado
|
|
"""
|
|
scores = SimilarityScore()
|
|
|
|
bpm_diff = abs(project.get('tempo', 120) - reference.get('tempo', 120))
|
|
scores.bpm_score = max(0, 1 - (bpm_diff / 30))
|
|
|
|
p_key, r_key = project.get('key', ''), reference.get('key', '')
|
|
scores.key_score = 1.0 if p_key == r_key else (0.5 if p_key and r_key and p_key[0] == r_key[0] else 0.0)
|
|
|
|
p_energy, r_energy = project.get('energy_curve', {}), reference.get('energy_curve', {})
|
|
if p_energy and r_energy:
|
|
p_l, r_l = p_energy.get('levels', []), r_energy.get('levels', [])
|
|
if p_l and r_l:
|
|
min_len = min(len(p_l), len(r_l))
|
|
corr = np.corrcoef(p_l[:min_len], r_l[:min_len])[0, 1]
|
|
scores.energy_score = (corr + 1) / 2 if not np.isnan(corr) else 0.5
|
|
|
|
p_spec, r_spec = project.get('spectrum', {}), reference.get('spectrum', {})
|
|
if p_spec and r_spec:
|
|
distance = np.linalg.norm(
|
|
np.array([p_spec.get(k, 0) for k in ['low', 'mid', 'high']]) -
|
|
np.array([r_spec.get(k, 0) for k in ['low', 'mid', 'high']])
|
|
)
|
|
scores.spectrum_score = max(0, 1 - distance / 3)
|
|
|
|
p_width, r_width = project.get('stereo_width', {}), reference.get('stereo_width', {})
|
|
if p_width and r_width:
|
|
diffs = [abs(p_width.get(k, 0) - r_width.get(k, 0)) for k in ['low', 'mid', 'high']]
|
|
scores.width_score = max(0, 1 - np.mean(diffs))
|
|
|
|
total = scores.total
|
|
interpretation = (
|
|
"Muy similar" if total >= 0.85 else
|
|
"Similar" if total >= 0.70 else
|
|
"Moderadamente similar" if total >= 0.55 else
|
|
"Poco similar" if total >= 0.40 else
|
|
"Diferente"
|
|
)
|
|
|
|
return {
|
|
"similarity_scores": scores.to_dict(),
|
|
"total_similarity": total,
|
|
"interpretation": interpretation,
|
|
"dimension_analysis": {
|
|
"bpm": {"project": project.get('tempo', 0), "reference": reference.get('tempo', 0), "score": scores.bpm_score},
|
|
"key": {"project": p_key, "reference": r_key, "score": scores.key_score},
|
|
"energy": {"score": scores.energy_score},
|
|
"spectrum": {"score": scores.spectrum_score},
|
|
"width": {"score": scores.width_score},
|
|
},
|
|
}
|
|
|
|
def adapt_to_reference_style(self, project: Dict[str, Any],
|
|
reference_style: str) -> Dict[str, Any]:
|
|
"""
|
|
T060: Adapta estructura e instrumentación al estilo de referencia.
|
|
|
|
Args:
|
|
project: Proyecto a adaptar
|
|
reference_style: Estilo de referencia ('pop', 'edm', 'hiphop', 'reggaeton')
|
|
|
|
Returns:
|
|
Dict con adaptaciones sugeridas
|
|
"""
|
|
profiles = {
|
|
'reggaeton': {
|
|
'structure': ['intro', 'verse', 'build', 'drop', 'break', 'drop', 'outro'],
|
|
'bpm_range': (85, 105),
|
|
'key_type': 'minor',
|
|
'instruments': ['kick', 'snare', 'dembow_hats', 'bass', 'synth_lead'],
|
|
'width': 'narrow_low_wide_high',
|
|
},
|
|
'pop': {
|
|
'structure': ['intro', 'verse', 'prechorus', 'chorus', 'verse', 'chorus', 'bridge', 'chorus', 'outro'],
|
|
'bpm_range': (90, 130),
|
|
'key_type': 'major',
|
|
'instruments': ['kick', 'snare', 'hats', 'bass', 'pad', 'lead_vocal'],
|
|
'width': 'balanced',
|
|
},
|
|
'edm': {
|
|
'structure': ['intro', 'build', 'drop', 'break', 'build', 'drop', 'outro'],
|
|
'bpm_range': (120, 140),
|
|
'key_type': 'minor',
|
|
'instruments': ['kick', 'snare', 'hats', 'sub_bass', 'synth_lead', 'fx'],
|
|
'width': 'wide',
|
|
},
|
|
'hiphop': {
|
|
'structure': ['intro', 'verse', 'hook', 'verse', 'hook', 'bridge', 'hook', 'outro'],
|
|
'bpm_range': (70, 100),
|
|
'key_type': 'minor',
|
|
'instruments': ['kick', 'snare', 'hats', '808_bass', 'sample', 'vocal'],
|
|
'width': 'centered',
|
|
},
|
|
}
|
|
|
|
profile = profiles.get(reference_style.lower(), profiles['reggaeton'])
|
|
current_tracks = project.get('tracks', [])
|
|
current_bpm = project.get('tempo', 120)
|
|
current_roles = {t.get('role', 'unknown') for t in current_tracks}
|
|
|
|
changes = [
|
|
{"action": "add", "instrument": i, "reason": "Característico del estilo"}
|
|
for i in profile['instruments'] if i not in current_roles
|
|
]
|
|
changes.extend([
|
|
{"action": "consider_remove", "instrument": r, "reason": "No típico del estilo"}
|
|
for r in current_roles if r not in profile['instruments']
|
|
])
|
|
|
|
priorities = []
|
|
if not (profile['bpm_range'][0] <= current_bpm <= profile['bpm_range'][1]):
|
|
priorities.append("adjust_bpm")
|
|
if len(project.get('structure', [])) < len(profile['structure']):
|
|
priorities.append("extend_structure")
|
|
if [i for i in profile['instruments'] if i not in current_roles]:
|
|
priorities.append("add_missing_instruments")
|
|
if not priorities:
|
|
priorities.append("fine_tune_mix")
|
|
|
|
return {
|
|
"target_style": reference_style,
|
|
"current_structure": project.get('structure', []),
|
|
"suggested_structure": profile['structure'],
|
|
"bpm_adjustment": {
|
|
"current": current_bpm,
|
|
"target_range": profile['bpm_range'],
|
|
"suggested": sum(profile['bpm_range']) // 2,
|
|
},
|
|
"instrumentation_changes": changes,
|
|
"stereo_width_target": profile['width'],
|
|
"adaptation_priority": priorities,
|
|
}
|
|
|
|
|
|
# =============================================================================
|
|
# FUNCIONES DE CONVENIENCIA
|
|
# =============================================================================
|
|
|
|
def analyze_project_key(tracks: List[Dict[str, Any]]) -> Dict[str, Any]:
|
|
"""Función de conveniencia para analizar key de proyecto."""
|
|
analyzer = ProjectAnalyzer()
|
|
return analyzer.analyze_project_key(tracks)
|
|
|
|
|
|
def harmonize_track(track_index: int, chord_progression: List[str]) -> Dict[str, Any]:
|
|
"""Función de conveniencia para armonizar track."""
|
|
analyzer = ProjectAnalyzer()
|
|
return analyzer.harmonize_track(track_index, chord_progression)
|
|
|
|
|
|
def generate_counter_melody(main_melody_track: Dict[str, Any],
|
|
harmony_level: str = "thirds") -> Dict[str, Any]:
|
|
"""Función de conveniencia para generar contra-melodía."""
|
|
generator = CounterMelodyGenerator()
|
|
return generator.generate_counter_melody(main_melody_track, harmony_level)
|
|
|
|
|
|
def variate_loop(loop_clips: List[Dict[str, Any]],
|
|
variation_intensity: float = 0.5) -> List[Dict[str, Any]]:
|
|
"""Función de conveniencia para variar loop."""
|
|
engine = VariationEngine()
|
|
return engine.variate_loop(loop_clips, variation_intensity)
|
|
|
|
|
|
def create_vocal_chops(vocal_sample_path: str, num_chops: int = 8) -> Dict[str, Any]:
|
|
"""Función de conveniencia para crear vocal chops."""
|
|
intelligence = SampleIntelligence()
|
|
return intelligence.create_vocal_chops(vocal_sample_path, num_chops)
|
|
|
|
|
|
# =============================================================================
|
|
# EXPORTS
|
|
# =============================================================================
|
|
|
|
__all__ = [
|
|
# Dataclasses
|
|
"EnergyCurve",
|
|
"SpectrumProfile",
|
|
"StereoWidth",
|
|
"SimilarityScore",
|
|
# Clases principales - Parte 1 (T041-T045)
|
|
"ProjectAnalyzer",
|
|
"CounterMelodyGenerator",
|
|
# Clases principales - Parte 2 (T046-T050)
|
|
"VariationEngine",
|
|
# Clases principales - Parte 3 (T051-T055)
|
|
"SampleIntelligence",
|
|
# Clases principales - Parte 4 (T056-T060)
|
|
"ReferenceMatcher",
|
|
# Funciones de conveniencia
|
|
"analyze_project_key",
|
|
"harmonize_track",
|
|
"generate_counter_melody",
|
|
"variate_loop",
|
|
"create_vocal_chops",
|
|
]
|