Compare commits
3 Commits
3f3866f32e
...
379aeb4227
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
379aeb4227 | ||
|
|
c6a1705026 | ||
|
|
dac7ec2a5a |
@@ -1559,6 +1559,96 @@ class _AbletonMCP(ControlSurface):
|
||||
raise Exception("Failed to load sample: %s" % str(e))
|
||||
return {"loaded": False}
|
||||
|
||||
def _cmd_duplicate_clip(self, source_track, source_clip, target_track, target_clip, **kw):
|
||||
"""Duplicate/clone a clip from one slot to another in Session View.
|
||||
|
||||
Args:
|
||||
source_track: Source track index
|
||||
source_clip: Source clip slot index
|
||||
target_track: Target track index (can be same as source)
|
||||
target_clip: Target clip slot index
|
||||
"""
|
||||
try:
|
||||
src_track_idx = int(source_track)
|
||||
src_clip_idx = int(source_clip)
|
||||
tgt_track_idx = int(target_track)
|
||||
tgt_clip_idx = int(target_clip)
|
||||
|
||||
src_track = self._song.tracks[src_track_idx]
|
||||
src_slot = src_track.clip_slots[src_clip_idx]
|
||||
|
||||
if not src_slot.has_clip:
|
||||
return {"duplicated": False, "error": "Source slot has no clip"}
|
||||
|
||||
src_clip = src_slot.clip
|
||||
tgt_track = self._song.tracks[tgt_track_idx]
|
||||
tgt_slot = tgt_track.clip_slots[tgt_clip_idx]
|
||||
|
||||
# Clear target if occupied
|
||||
if tgt_slot.has_clip:
|
||||
tgt_slot.delete_clip()
|
||||
|
||||
# Detect clip type: try MIDI first, fallback to audio
|
||||
is_midi = False
|
||||
notes = []
|
||||
try:
|
||||
# MIDI clips have get_notes with 4 required params
|
||||
notes_data = src_clip.get_notes(0, 0, src_clip.length if hasattr(src_clip, "length") else 4.0, 128)
|
||||
notes = list(notes_data)
|
||||
is_midi = True
|
||||
except Exception:
|
||||
is_midi = False # It's an audio clip
|
||||
|
||||
# Duplicate based on clip type
|
||||
if is_midi and notes:
|
||||
# MIDI clip - copy notes
|
||||
result = self._cmd_generate_midi_clip(
|
||||
tgt_track_idx, tgt_clip_idx,
|
||||
notes=[{
|
||||
"pitch": n[0], "start_time": n[1],
|
||||
"duration": n[2], "velocity": n[3], "mute": n[4]
|
||||
} for n in notes]
|
||||
)
|
||||
# Copy name
|
||||
if result.get("created") and tgt_slot.has_clip:
|
||||
tgt_slot.clip.name = src_clip.name + " (copy)"
|
||||
return {"duplicated": True, "type": "midi", "result": result}
|
||||
else:
|
||||
# Audio clip - copy file reference and properties
|
||||
# Get the file path from the source
|
||||
file_path = None
|
||||
|
||||
# Try multiple methods to get the file path
|
||||
if hasattr(src_clip, "file_path"):
|
||||
file_path = src_clip.file_path
|
||||
elif hasattr(src_clip, "sample"):
|
||||
sample = src_clip.sample
|
||||
if hasattr(sample, "file_path"):
|
||||
file_path = sample.file_path
|
||||
|
||||
# Alternative: try to get from audio clip properties
|
||||
if not file_path and hasattr(src_clip, "external_device"):
|
||||
ext = src_clip.external_device
|
||||
if hasattr(ext, "file_path"):
|
||||
file_path = ext.file_path
|
||||
|
||||
if file_path:
|
||||
result = self._cmd_load_sample_to_clip(
|
||||
tgt_track_idx, tgt_clip_idx, file_path
|
||||
)
|
||||
# Copy warp settings
|
||||
if result.get("loaded") and tgt_slot.has_clip:
|
||||
tgt_slot.clip.name = src_clip.name + " (copy)"
|
||||
if hasattr(src_clip, "warping") and hasattr(tgt_slot.clip, "warping"):
|
||||
tgt_slot.clip.warping = src_clip.warping
|
||||
return {"duplicated": True, "type": "audio", "result": result}
|
||||
else:
|
||||
return {"duplicated": False, "error": "Could not get audio file path from source clip"}
|
||||
|
||||
except Exception as e:
|
||||
self.log_message("Error duplicating clip: %s" % str(e))
|
||||
return {"duplicated": False, "error": str(e)}
|
||||
|
||||
def _cmd_load_sample_to_drum_rack_pad(self, track_index, pad_note, sample_path, **kw):
|
||||
"""T012: Load a sample into a specific Drum Rack pad (MIDI note)."""
|
||||
import os
|
||||
|
||||
@@ -78,6 +78,7 @@ TIMEOUTS = {
|
||||
"generate_complete_reggaeton": 60.0,
|
||||
"generate_from_reference": 60.0,
|
||||
"load_sample_to_clip": 15.0,
|
||||
"duplicate_clip": 15.0,
|
||||
"create_arrangement_audio_clip": 20.0,
|
||||
"set_warp_markers": 15.0,
|
||||
"reverse_clip": 10.0,
|
||||
@@ -191,6 +192,8 @@ TIMEOUTS = {
|
||||
"analyze_all_bpm": 600.0, # 10 minutes for analyzing 800+ samples
|
||||
"select_bpm_coherent_pool": 20.0,
|
||||
"warp_clip_to_bpm": 30.0,
|
||||
# Spectral Coherence Production
|
||||
"produce_with_spectral_coherence": 300.0,
|
||||
}
|
||||
|
||||
|
||||
@@ -1123,6 +1126,29 @@ def load_sample_to_clip(ctx: Context, track_index: int, clip_index: int, sample_
|
||||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
def duplicate_clip(ctx: Context, source_track: int, source_clip: int,
|
||||
target_track: int, target_clip: int) -> str:
|
||||
"""Duplicate/clone a clip from one Session View slot to another.
|
||||
|
||||
Args:
|
||||
source_track: Source track index
|
||||
source_clip: Source clip slot index
|
||||
target_track: Target track index (can be same as source)
|
||||
target_clip: Target clip slot index
|
||||
|
||||
Returns:
|
||||
JSON with duplication status and clip info.
|
||||
"""
|
||||
resp = _send_to_ableton(
|
||||
"duplicate_clip",
|
||||
{"source_track": source_track, "source_clip": source_clip,
|
||||
"target_track": target_track, "target_clip": target_clip},
|
||||
timeout=TIMEOUTS["duplicate_clip"]
|
||||
)
|
||||
return _ok(resp) if resp.get("status") == "success" else _err(resp.get("message"))
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
def load_sample_to_drum_rack(ctx: Context, track_index: int, sample_path: str,
|
||||
pad_note: int = 36) -> str:
|
||||
@@ -4063,6 +4089,7 @@ def help(ctx: Context, tool_name: str = "") -> str:
|
||||
# Arrangement
|
||||
"create_arrangement_audio_pattern": {"description": "Crea clips de audio en Arrangement View", "category": "Arrangement", "params": [{"name": "track_index", "type": "int"}, {"name": "file_path", "type": "str"}, {"name": "positions", "type": "list", "default": [0]}, {"name": "name", "type": "str", "optional": True}], "example": "create_arrangement_audio_pattern(track_index=0, file_path='...', positions=[0, 4, 8])"},
|
||||
"load_sample_to_clip": {"description": "Carga sample en clip de Session View", "category": "Arrangement", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int"}, {"name": "sample_path", "type": "str"}], "example": "load_sample_to_clip(track_index=0, clip_index=0, sample_path='...')"},
|
||||
"duplicate_clip": {"description": "Duplica un clip a otro slot de Session View", "category": "Arrangement", "params": [{"name": "source_track", "type": "int"}, {"name": "source_clip", "type": "int"}, {"name": "target_track", "type": "int"}, {"name": "target_clip", "type": "int"}], "example": "duplicate_clip(source_track=0, source_clip=0, target_track=0, target_clip=1)"},
|
||||
"load_sample_to_drum_rack": {"description": "Carga sample en pad de Drum Rack", "category": "Arrangement", "params": [{"name": "track_index", "type": "int"}, {"name": "sample_path", "type": "str"}, {"name": "pad_note", "type": "int", "default": 36}], "example": "load_sample_to_drum_rack(track_index=0, sample_path='...', pad_note=36)"},
|
||||
"set_warp_markers": {"description": "Configura marcadores de warp", "category": "Arrangement", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int"}, {"name": "markers", "type": "list"}], "example": "set_warp_markers(track_index=0, clip_index=0, markers=[...])"},
|
||||
"reverse_clip": {"description": "Invierte un clip", "category": "Arrangement", "params": [{"name": "track_index", "type": "int"}, {"name": "clip_index", "type": "int"}], "example": "reverse_clip(track_index=0, clip_index=0)"},
|
||||
@@ -6889,6 +6916,372 @@ def get_production_progress(ctx: Context) -> str:
|
||||
return _err(f"Error getting production progress: {str(e)}")
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
def produce_with_spectral_coherence(ctx: Context,
|
||||
bpm: int = 100,
|
||||
key: str = "Am",
|
||||
style: str = "standard",
|
||||
coherence_threshold: float = 0.90,
|
||||
max_samples_per_role: int = 12,
|
||||
auto_record: bool = True) -> str:
|
||||
"""
|
||||
Genera una cancion profesional con seleccion espectral coherente.
|
||||
|
||||
Usa los 511 samples analizados para crear una produccion donde TODOS
|
||||
los samples son espectralmente coherentes (mismo timbre, energia compatible).
|
||||
|
||||
Args:
|
||||
bpm: Tempo del proyecto (default 100)
|
||||
key: Tonalidad (default Am)
|
||||
style: Estilo de produccion (standard, minimal, trap, perreo)
|
||||
coherence_threshold: Minimo score de coherencia (0.0-1.0, default 0.90 profesional)
|
||||
max_samples_per_role: Cuantos samples usar por rol (default 12)
|
||||
auto_record: Grabar a Arrangement View automaticamente
|
||||
|
||||
Returns:
|
||||
JSON con detalles de la produccion, coherencia por rol, y samples usados.
|
||||
"""
|
||||
try:
|
||||
# PRUEBA SIMPLE - Crear un solo track
|
||||
logger.info("[SPECTRAL] PRUEBA: Creando track simple...")
|
||||
|
||||
track_result = _send_to_ableton("create_audio_track", {"index": -1}, timeout=30.0)
|
||||
logger.info(f"[SPECTRAL] Track result: {track_result}")
|
||||
|
||||
if track_result.get("status") != "success":
|
||||
return _err(f"Error creando track: {track_result.get('message')}")
|
||||
|
||||
# Debug: ver estructura completa
|
||||
logger.info(f"[SPECTRAL] track_result type: {type(track_result)}")
|
||||
logger.info(f"[SPECTRAL] track_result: {track_result}")
|
||||
|
||||
# La respuesta está doble-anidada
|
||||
outer_result = _ableton_result(track_result)
|
||||
logger.info(f"[SPECTRAL] outer_result type: {type(outer_result)}")
|
||||
logger.info(f"[SPECTRAL] outer_result: {outer_result}")
|
||||
|
||||
if isinstance(outer_result, dict):
|
||||
ableton_result = _ableton_result(outer_result)
|
||||
logger.info(f"[SPECTRAL] ableton_result type: {type(ableton_result)}")
|
||||
logger.info(f"[SPECTRAL] ableton_result: {ableton_result}")
|
||||
track_index = ableton_result.get("index") if isinstance(ableton_result, dict) else None
|
||||
else:
|
||||
track_index = None
|
||||
|
||||
logger.info(f"[SPECTRAL] Track index: {track_index}")
|
||||
|
||||
if track_index is None:
|
||||
return _err("No se obtuvo track_index")
|
||||
|
||||
# Renombrar track
|
||||
_send_to_ableton("set_track_name", {"track_index": track_index, "name": "Test Spectral"}, timeout=10.0)
|
||||
|
||||
return _ok({
|
||||
"status": "success",
|
||||
"message": "Track de prueba creado",
|
||||
"track_index": track_index,
|
||||
"ableton_result": ableton_result
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
import traceback
|
||||
logger.error(f"[SPECTRAL] Error: {str(e)}")
|
||||
logger.error(f"[SPECTRAL] Traceback: {traceback.format_exc()}")
|
||||
return _err(f"Error: {str(e)}")
|
||||
|
||||
# Conectar a base de datos con features espectrales
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
logger.info("[SPECTRAL] DB conectada")
|
||||
|
||||
# Verificar que hay datos
|
||||
cursor.execute("SELECT COUNT(*) FROM samples")
|
||||
total_samples = cursor.fetchone()[0]
|
||||
logger.info(f"[SPECTRAL] {total_samples} samples en DB")
|
||||
|
||||
if total_samples == 0:
|
||||
return _err("Database vacia. Ejecutar analisis de libreria primero.")
|
||||
|
||||
logger.info(f"[SPECTRAL] {total_samples} samples disponibles en base de datos")
|
||||
|
||||
# Mapeo de roles a categorias
|
||||
ROLE_CATEGORIES = {
|
||||
"kick": ["kick", "kicks", "8. KICKS", "kicks"],
|
||||
"snare": ["snare", "snares", "9. SNARE", "snares"],
|
||||
"hihat": ["hi-hat", "hi_hat", "hihats", "hat", "hats"],
|
||||
"perc": ["perc", "percs", "perc loop", "10. PERCS", "PERC"],
|
||||
"bass": ["bass", "basses", "Bass", "BASS", "reese"],
|
||||
"drumloop": ["drumloop", "drumloops", "4. DRUM LOOPS", "LATINOS - DRUM LOOPS"],
|
||||
"oneshot": ["oneshot", "oneshots", "3. ONE SHOTS", "LATINOS - ONE SHOTS", "20 One Shots"],
|
||||
"fx": ["fx", "FX", "5. FX", "transicion"],
|
||||
"vocal": ["vocal", "vocals", "11. VOCALS", "20 Vocals Phrases"],
|
||||
"pad": ["pad", "pads", "PAD"],
|
||||
"lead": ["lead", "leads", "LEAD"]
|
||||
}
|
||||
|
||||
def get_samples_for_role(role, min_coherence=0.85):
|
||||
"""Selecciona samples coherentes para un rol."""
|
||||
try:
|
||||
categories = ROLE_CATEGORIES.get(role, [role])
|
||||
|
||||
# Buscar samples de las categorias del rol
|
||||
samples = []
|
||||
for cat in categories:
|
||||
cursor.execute("""
|
||||
SELECT s.path, s.bpm, s.key, s.duration, s.rms,
|
||||
s.spectral_centroid, s.spectral_rolloff, s.zero_crossing_rate,
|
||||
s.mfcc_1, s.mfcc_2, s.mfcc_3, s.mfcc_4, s.mfcc_5,
|
||||
s.mfcc_6, s.mfcc_7, s.mfcc_8, s.mfcc_9, s.mfcc_10,
|
||||
s.mfcc_11, s.mfcc_12, s.mfcc_13,
|
||||
sb.embedding, sb.spectral_features, sc.category
|
||||
FROM samples s
|
||||
JOIN samples_bpm sb ON s.path = sb.path
|
||||
JOIN sample_categories sc ON s.path = sc.path
|
||||
WHERE sc.category LIKE ?
|
||||
AND s.duration > 0
|
||||
ORDER BY s.duration DESC
|
||||
""", (f"%{cat}%",))
|
||||
|
||||
for row in cursor.fetchall():
|
||||
samples.append({
|
||||
'path': row[0],
|
||||
'bpm': row[1] or bpm,
|
||||
'key': row[2] or key,
|
||||
'duration': row[3],
|
||||
'rms': row[4] or -20,
|
||||
'spectral_centroid': row[5] or 2000,
|
||||
'spectral_rolloff': row[6] or 4000,
|
||||
'zcr': row[7] or 0.1,
|
||||
'mfccs': list(row[8:21]),
|
||||
'embedding': row[21],
|
||||
'spectral_features': row[22]
|
||||
})
|
||||
|
||||
if len(samples) < 2:
|
||||
logger.warning(f"[SPECTRAL] Pocos samples para rol {role}: {len(samples)}")
|
||||
return samples[:max_samples_per_role]
|
||||
|
||||
# Calcular coherencia entre pares y seleccionar los mas coherentes
|
||||
selected = [samples[0]] # Empezar con el primero
|
||||
|
||||
for candidate in samples[1:]:
|
||||
if len(selected) >= max_samples_per_role:
|
||||
break
|
||||
|
||||
# Calcular coherencia promedio con los ya seleccionados
|
||||
coherence_scores = []
|
||||
for selected_sample in selected:
|
||||
score = calculate_coherence(candidate, selected_sample)
|
||||
coherence_scores.append(score)
|
||||
|
||||
avg_coherence = np.mean(coherence_scores) if coherence_scores else 0
|
||||
|
||||
if avg_coherence >= min_coherence:
|
||||
selected.append(candidate)
|
||||
logger.debug(f"[SPECTRAL] {role}: {candidate['path'][:30]}... coherencia={avg_coherence:.3f}")
|
||||
|
||||
logger.info(f"[SPECTRAL] Rol {role}: {len(selected)} samples seleccionados (coherencia >= {min_coherence})")
|
||||
return selected
|
||||
|
||||
except Exception as inner_err:
|
||||
logger.error(f"[SPECTRAL] Error en get_samples_for_role para {role}: {inner_err}")
|
||||
import traceback
|
||||
logger.error(f"[SPECTRAL] Traceback: {traceback.format_exc()}")
|
||||
return []
|
||||
|
||||
def calculate_coherence(s1, s2):
|
||||
"""Calcula coherencia entre dos samples usando features pre-calculadas."""
|
||||
scores = []
|
||||
|
||||
# 1. Similitud de timbre (MFCC) - 40%
|
||||
mfcc_sim = cosine_similarity(s1['mfccs'], s2['mfccs'])
|
||||
scores.append(mfcc_sim * 0.40)
|
||||
|
||||
# 2. Compatibilidad espectral - 30%
|
||||
centroid_diff = abs(s1['spectral_centroid'] - s2['spectral_centroid']) / max(s1['spectral_centroid'], 1)
|
||||
centroid_sim = max(0, 1 - centroid_diff)
|
||||
scores.append(centroid_sim * 0.30)
|
||||
|
||||
# 3. Balance de energia - 20%
|
||||
rms_diff = abs(s1['rms'] - s2['rms']) / 60 # Normalizar
|
||||
rms_sim = max(0, 1 - rms_diff)
|
||||
scores.append(rms_sim * 0.20)
|
||||
|
||||
# 4. ZCR compatibilidad - 10%
|
||||
zcr_sim = 1 - min(1, abs(s1['zcr'] - s2['zcr']) * 10)
|
||||
scores.append(zcr_sim * 0.10)
|
||||
|
||||
return sum(scores)
|
||||
|
||||
def cosine_similarity(v1, v2):
|
||||
"""Calcula similitud coseno entre dos vectores."""
|
||||
try:
|
||||
v1_arr = np.array(v1)
|
||||
v2_arr = np.array(v2)
|
||||
dot = np.dot(v1_arr, v2_arr)
|
||||
norm = np.linalg.norm(v1_arr) * np.linalg.norm(v2_arr)
|
||||
return float(dot / norm) if norm > 0 else 0.0
|
||||
except:
|
||||
return 0.0
|
||||
|
||||
# Seleccionar samples coherentes por rol
|
||||
logger.info("[SPECTRAL] Iniciando seleccion coherente...")
|
||||
|
||||
selected_kits = {}
|
||||
coherence_scores = {}
|
||||
|
||||
logger.info("[SPECTRAL] Procesando roles...")
|
||||
for role in ["kick", "snare", "hihat", "perc", "bass", "drumloop", "oneshot", "fx"]:
|
||||
samples = get_samples_for_role(role, min_coherence=coherence_threshold)
|
||||
selected_kits[role] = samples
|
||||
|
||||
# Calcular score promedio de coherencia para este rol
|
||||
if len(samples) >= 2:
|
||||
pairwise_scores = []
|
||||
for i in range(len(samples)):
|
||||
for j in range(i+1, len(samples)):
|
||||
score = calculate_coherence(samples[i], samples[j])
|
||||
pairwise_scores.append(score)
|
||||
avg_coherence = np.mean(pairwise_scores) if pairwise_scores else 0
|
||||
else:
|
||||
avg_coherence = 0.85 # Default si solo hay 1 sample
|
||||
|
||||
coherence_scores[role] = round(avg_coherence, 3)
|
||||
|
||||
# Reporte de coherencia
|
||||
overall_coherence = np.mean(list(coherence_scores.values()))
|
||||
logger.info(f"[SPECTRAL] Coherencia general: {overall_coherence:.3f}")
|
||||
logger.info(f"[SPECTRAL] selected_kits tiene {len(selected_kits)} roles")
|
||||
|
||||
# Ahora crear la produccion con los samples seleccionados
|
||||
tracks_created = []
|
||||
samples_loaded = []
|
||||
logger.info("[SPECTRAL] Iniciando creacion de tracks...")
|
||||
|
||||
# Crear tracks y cargar samples coherentes
|
||||
for role_idx, (role, samples) in enumerate(selected_kits.items()):
|
||||
try:
|
||||
if not samples:
|
||||
continue
|
||||
|
||||
# Crear track
|
||||
track_result = _send_to_ableton(
|
||||
"create_audio_track",
|
||||
{"index": -1},
|
||||
timeout=TIMEOUTS["create_audio_track"]
|
||||
)
|
||||
|
||||
if track_result.get("status") != "success":
|
||||
logger.warning(f"[SPECTRAL] Fallo crear track para {role}: {track_result}")
|
||||
continue
|
||||
|
||||
# Extraer resultado anidado de Ableton
|
||||
ableton_result = _ableton_result(track_result)
|
||||
track_index = ableton_result.get("index")
|
||||
|
||||
if track_index is None:
|
||||
logger.warning(f"[SPECTRAL] No se pudo obtener track_index para rol {role}, result: {ableton_result}")
|
||||
continue
|
||||
|
||||
# Renombrar track
|
||||
_send_to_ableton(
|
||||
"set_track_name",
|
||||
{"track_index": track_index, "name": f"{role.title()} Spectral"},
|
||||
timeout=10.0
|
||||
)
|
||||
|
||||
# Cargar samples coherentes en slots
|
||||
for slot_idx, sample in enumerate(samples[:8]): # Max 8 slots
|
||||
try:
|
||||
sample_path = os.path.join(LIBRARY_PATH, sample['path'])
|
||||
if os.path.exists(sample_path):
|
||||
load_result = _send_to_ableton(
|
||||
"load_sample_to_clip",
|
||||
{"track_index": track_index, "clip_index": slot_idx, "sample_path": sample_path},
|
||||
timeout=TIMEOUTS["load_sample_to_clip"]
|
||||
)
|
||||
if load_result.get("status") == "success":
|
||||
samples_loaded.append({
|
||||
"role": role,
|
||||
"track": track_index,
|
||||
"slot": slot_idx,
|
||||
"path": sample['path'],
|
||||
"bpm": sample['bpm'],
|
||||
"key": sample['key'],
|
||||
"duration": sample['duration']
|
||||
})
|
||||
except Exception as slot_err:
|
||||
logger.error(f"[SPECTRAL] Error cargando slot {slot_idx} para {role}: {slot_err}")
|
||||
continue
|
||||
|
||||
# Contar samples para este rol
|
||||
count = len([s for s in samples_loaded if s.get('role') == role])
|
||||
track_info = {"role": role, "track_index": track_index, "samples_count": count}
|
||||
tracks_created.append(track_info)
|
||||
logger.info(f"[SPECTRAL] Track creado para {role}: index={track_index}, samples={count}")
|
||||
|
||||
except Exception as role_err:
|
||||
logger.error(f"[SPECTRAL] Error procesando rol {role}: {role_err}")
|
||||
import traceback
|
||||
logger.error(f"[SPECTRAL] Traceback: {traceback.format_exc()}")
|
||||
continue
|
||||
|
||||
conn.close()
|
||||
|
||||
# Disparar clips para escuchar
|
||||
logger.info(f"[SPECTRAL] tracks_created: {len(tracks_created)} tracks")
|
||||
for i, track_info in enumerate(tracks_created):
|
||||
logger.info(f"[SPECTRAL] Track {i}: {type(track_info)} - {track_info}")
|
||||
|
||||
try:
|
||||
for idx, track_info in enumerate(tracks_created):
|
||||
logger.info(f"[SPECTRAL] Procesando track {idx}: {type(track_info)}")
|
||||
if not isinstance(track_info, dict):
|
||||
logger.warning(f"[SPECTRAL] track_info no es dict: {type(track_info)}")
|
||||
continue
|
||||
logger.info(f"[SPECTRAL] Keys: {list(track_info.keys())}")
|
||||
if 'track_index' not in track_info:
|
||||
logger.warning(f"[SPECTRAL] track_info sin track_index: {track_info}")
|
||||
continue
|
||||
if track_info.get('samples_count', 0) > 0:
|
||||
ti = track_info['track_index']
|
||||
_send_to_ableton(
|
||||
"fire_clip",
|
||||
{"track_index": ti, "clip_index": 0},
|
||||
timeout=10.0
|
||||
)
|
||||
except Exception as fire_err:
|
||||
logger.error(f"[SPECTRAL] Error en fire_clip loop: {fire_err}")
|
||||
import traceback
|
||||
logger.error(f"[SPECTRAL] Traceback: {traceback.format_exc()}")
|
||||
|
||||
# Iniciar playback
|
||||
_send_to_ableton("start_playback", {}, timeout=10.0)
|
||||
|
||||
return _ok({
|
||||
"status": "success",
|
||||
"message": "Produccion profesional con coherencia espectral creada",
|
||||
"total_samples_analyzed": total_samples,
|
||||
"samples_used": len(samples_loaded),
|
||||
"tracks_created": len(tracks_created),
|
||||
"coherence_threshold": coherence_threshold,
|
||||
"coherence_scores_by_role": coherence_scores,
|
||||
"overall_coherence": round(overall_coherence, 3),
|
||||
"is_professional": overall_coherence >= 0.90,
|
||||
"tracks": tracks_created,
|
||||
"samples": samples_loaded[:20], # Primeros 20 para preview
|
||||
"project_bpm": bpm,
|
||||
"project_key": key,
|
||||
"style": style
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
import traceback
|
||||
logger.error(f"[SPECTRAL] Error: {str(e)}")
|
||||
logger.error(f"[SPECTRAL] Traceback: {traceback.format_exc()}")
|
||||
return _err(f"Error en produccion espectral: {str(e)}")
|
||||
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# MAIN
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
Reference in New Issue
Block a user