- Add _cmd_create_arrangement_audio_pattern with 5-method fallback chain - Method 1: track.insert_arrangement_clip() [Live 12+] - Method 2: track.create_audio_clip() [Live 11+] - Method 3: arrangement_clips.add_new_clip() [Live 12+] - Method 4: Session->duplicate_clip_to_arrangement [Legacy] - Method 5: Session->Recording [Universal] - Add _cmd_duplicate_clip_to_arrangement for session-to-arrangement workflow - Update skills documentation - Verified: 3 clips created at positions [0, 4, 8] in Arrangement View Closes: Audio injection in Arrangement View
889 lines
30 KiB
Python
889 lines
30 KiB
Python
"""
|
|
IterationEngine - Achieves target coherence through intelligent retries.
|
|
|
|
This module implements professional-grade iteration strategies to achieve
|
|
coherence scores >= 0.90 for sample selections. Never accepts sub-standard
|
|
results - either achieves target or fails explicitly.
|
|
|
|
Usage:
|
|
from engines.iteration_engine import IterationEngine, ProfessionalCoherenceError
|
|
|
|
engine = IterationEngine()
|
|
try:
|
|
result = engine.iterate_until_coherence(
|
|
selection_func=select_samples,
|
|
target_coherence=0.90
|
|
)
|
|
except ProfessionalCoherenceError as e:
|
|
# Handle professional-grade failure
|
|
print(f"Failed to achieve coherence: {e}")
|
|
|
|
Architecture:
|
|
- Iteration strategies with progressive relaxation
|
|
- Automatic failure analysis and recovery suggestions
|
|
- Integration with CoherenceScorer and RationaleLogger
|
|
- Professional-grade: No shortcuts, achieves target or fails explicitly
|
|
"""
|
|
|
|
import time
|
|
import logging
|
|
from typing import Optional, Dict, List, Any, Callable, Union, Tuple
|
|
from dataclasses import dataclass, field
|
|
from enum import Enum
|
|
|
|
logger = logging.getLogger("IterationEngine")
|
|
|
|
|
|
# =============================================================================
|
|
# PROFESSIONAL COHERENCE ERROR
|
|
# =============================================================================
|
|
|
|
class ProfessionalCoherenceError(Exception):
|
|
"""
|
|
Exception raised when professional-grade coherence cannot be achieved.
|
|
|
|
This error is raised after all iteration strategies have been exhausted
|
|
without achieving the minimum acceptable coherence threshold (0.90).
|
|
|
|
Attributes:
|
|
best_score: Highest coherence score achieved across all attempts
|
|
attempts_made: Number of iteration strategies tried
|
|
suggestions: List of recommendations for manual curation
|
|
message: Detailed error message with all context
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
best_score: float,
|
|
attempts_made: int,
|
|
suggestions: List[str],
|
|
message: Optional[str] = None
|
|
):
|
|
self.best_score = best_score
|
|
self.attempts_made = attempts_made
|
|
self.suggestions = suggestions
|
|
|
|
if message is None:
|
|
message = self._build_message()
|
|
|
|
super().__init__(message)
|
|
|
|
def _build_message(self) -> str:
|
|
"""Build comprehensive error message."""
|
|
lines = [
|
|
f"ProfessionalCoherenceError: Failed to achieve coherence >= 0.90",
|
|
f"",
|
|
f"Best score achieved: {self.best_score:.3f}",
|
|
f"Attempts made: {self.attempts_made}",
|
|
f"",
|
|
f"Recommendations:",
|
|
]
|
|
for i, suggestion in enumerate(self.suggestions, 1):
|
|
lines.append(f" {i}. {suggestion}")
|
|
|
|
lines.append(f"")
|
|
lines.append(f"Consider:")
|
|
lines.append(f" - Adding more high-quality samples to the library")
|
|
lines.append(f" - Manual curation of samples for this genre")
|
|
lines.append(f" - Checking sample quality and consistency")
|
|
|
|
return "\n".join(lines)
|
|
|
|
def to_dict(self) -> Dict[str, Any]:
|
|
"""Convert error to dictionary for serialization."""
|
|
return {
|
|
"error_type": "ProfessionalCoherenceError",
|
|
"best_score": self.best_score,
|
|
"attempts_made": self.attempts_made,
|
|
"suggestions": self.suggestions,
|
|
"message": str(self)
|
|
}
|
|
|
|
|
|
# =============================================================================
|
|
# ITERATION STRATEGIES
|
|
# =============================================================================
|
|
|
|
ITERATION_STRATEGIES = [
|
|
{
|
|
"attempt": 1,
|
|
"params": {
|
|
"coherence_threshold": 0.90,
|
|
"energy_tolerance": 0.10
|
|
},
|
|
"note": "Standard professional parameters"
|
|
},
|
|
{
|
|
"attempt": 2,
|
|
"params": {
|
|
"coherence_threshold": 0.88,
|
|
"energy_tolerance": 0.15
|
|
},
|
|
"note": "Slightly relaxed but still professional"
|
|
},
|
|
{
|
|
"attempt": 3,
|
|
"params": {
|
|
"coherence_threshold": 0.85,
|
|
"energy_tolerance": 0.20
|
|
},
|
|
"note": "Minimum professional grade"
|
|
},
|
|
{
|
|
"attempt": 4,
|
|
"params": {
|
|
"strategy": "reduce_count",
|
|
"count": 2,
|
|
"coherence_threshold": 0.90
|
|
},
|
|
"note": "Fewer samples but more coherent"
|
|
},
|
|
{
|
|
"attempt": 5,
|
|
"params": {
|
|
"strategy": "single_sample",
|
|
"count": 1,
|
|
"coherence_threshold": 0.90
|
|
},
|
|
"note": "Single high-quality sample only"
|
|
},
|
|
]
|
|
|
|
|
|
# =============================================================================
|
|
# DATA CLASSES
|
|
# =============================================================================
|
|
|
|
class IterationStatus(Enum):
|
|
"""Status of iteration attempt."""
|
|
PENDING = "pending"
|
|
IN_PROGRESS = "in_progress"
|
|
SUCCESS = "success"
|
|
FAILED = "failed"
|
|
ABORTED = "aborted"
|
|
|
|
|
|
@dataclass
|
|
class IterationAttempt:
|
|
"""Record of a single iteration attempt."""
|
|
attempt_number: int
|
|
strategy: Dict[str, Any]
|
|
status: IterationStatus = IterationStatus.PENDING
|
|
coherence_score: float = 0.0
|
|
duration_ms: float = 0.0
|
|
failure_reason: Optional[str] = None
|
|
kit_data: Optional[Any] = None
|
|
timestamp: float = field(default_factory=time.time)
|
|
|
|
def to_dict(self) -> Dict[str, Any]:
|
|
"""Convert to dictionary."""
|
|
return {
|
|
"attempt_number": self.attempt_number,
|
|
"strategy": self.strategy,
|
|
"status": self.status.value,
|
|
"coherence_score": self.coherence_score,
|
|
"duration_ms": self.duration_ms,
|
|
"failure_reason": self.failure_reason,
|
|
"timestamp": self.timestamp
|
|
}
|
|
|
|
|
|
@dataclass
|
|
class IterationResult:
|
|
"""Result of iteration process."""
|
|
success: bool
|
|
final_coherence: float
|
|
attempts: List[IterationAttempt]
|
|
successful_strategy: Optional[Dict[str, Any]] = None
|
|
total_duration_ms: float = 0.0
|
|
selected_kit: Optional[Any] = None
|
|
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
|
|
def to_dict(self) -> Dict[str, Any]:
|
|
"""Convert to dictionary."""
|
|
return {
|
|
"success": self.success,
|
|
"final_coherence": self.final_coherence,
|
|
"attempts": [a.to_dict() for a in self.attempts],
|
|
"successful_strategy": self.successful_strategy,
|
|
"total_duration_ms": self.total_duration_ms,
|
|
"metadata": self.metadata
|
|
}
|
|
|
|
|
|
# =============================================================================
|
|
# PLACEHOLDER CLASSES (for when dependencies are not available)
|
|
# =============================================================================
|
|
|
|
class CoherenceScorer:
|
|
"""
|
|
Placeholder/Actual CoherenceScorer for sample kit evaluation.
|
|
|
|
When the real CoherenceScorer is available, this will be replaced
|
|
or enhanced. For now, implements basic coherence calculation based
|
|
on sample metadata consistency.
|
|
"""
|
|
|
|
def __init__(self):
|
|
self.weights = {
|
|
"bpm_consistency": 0.30,
|
|
"key_consistency": 0.25,
|
|
"energy_balance": 0.25,
|
|
"spectral_compatibility": 0.20
|
|
}
|
|
|
|
def score_kit(self, kit: Any) -> float:
|
|
"""
|
|
Calculate coherence score for a kit.
|
|
|
|
Returns:
|
|
Coherence score between 0.0 and 1.0
|
|
"""
|
|
# If kit has pre-calculated coherence, use it
|
|
if hasattr(kit, 'coherence_score') and kit.coherence_score > 0:
|
|
return kit.coherence_score
|
|
|
|
# Calculate based on available metadata
|
|
scores = []
|
|
|
|
# BPM consistency
|
|
bpm_score = self._check_bpm_consistency(kit)
|
|
scores.append(bpm_score * self.weights["bpm_consistency"])
|
|
|
|
# Key consistency
|
|
key_score = self._check_key_consistency(kit)
|
|
scores.append(key_score * self.weights["key_consistency"])
|
|
|
|
# Energy balance
|
|
energy_score = self._check_energy_balance(kit)
|
|
scores.append(energy_score * self.weights["energy_balance"])
|
|
|
|
# Spectral compatibility (placeholder)
|
|
spectral_score = 0.85 # Default assumption
|
|
scores.append(spectral_score * self.weights["spectral_compatibility"])
|
|
|
|
total = sum(scores)
|
|
return min(1.0, max(0.0, total))
|
|
|
|
def _check_bpm_consistency(self, kit: Any) -> float:
|
|
"""Check BPM consistency across kit samples."""
|
|
bpms = []
|
|
|
|
if hasattr(kit, 'drums') and kit.drums:
|
|
for attr in ['kick', 'snare', 'clap', 'hat_closed', 'hat_open']:
|
|
sample = getattr(kit.drums, attr, None)
|
|
if sample and hasattr(sample, 'bpm') and sample.bpm > 0:
|
|
bpms.append(sample.bpm)
|
|
|
|
if hasattr(kit, 'bass') and kit.bass:
|
|
for sample in kit.bass:
|
|
if hasattr(sample, 'bpm') and sample.bpm > 0:
|
|
bpms.append(sample.bpm)
|
|
|
|
if len(bpms) < 2:
|
|
return 0.5 # Insufficient data
|
|
|
|
# Calculate variance
|
|
mean_bpm = sum(bpms) / len(bpms)
|
|
variance = sum((bpm - mean_bpm) ** 2 for bpm in bpms) / len(bpms)
|
|
|
|
# Convert to score (lower variance = higher score)
|
|
if variance == 0:
|
|
return 1.0
|
|
return max(0.0, 1.0 - (variance / 100))
|
|
|
|
def _check_key_consistency(self, kit: Any) -> float:
|
|
"""Check key consistency across kit samples."""
|
|
keys = []
|
|
|
|
if hasattr(kit, 'drums') and kit.drums:
|
|
for attr in ['kick', 'snare', 'clap', 'hat_closed', 'hat_open']:
|
|
sample = getattr(kit.drums, attr, None)
|
|
if sample and hasattr(sample, 'key') and sample.key:
|
|
keys.append(sample.key)
|
|
|
|
if hasattr(kit, 'bass') and kit.bass:
|
|
for sample in kit.bass:
|
|
if hasattr(sample, 'key') and sample.key:
|
|
keys.append(sample.key)
|
|
|
|
if len(keys) < 2:
|
|
return 0.5 # Insufficient data
|
|
|
|
# Count key occurrences
|
|
key_counts = {}
|
|
for key in keys:
|
|
key_counts[key] = key_counts.get(key, 0) + 1
|
|
|
|
# Score based on most common key frequency
|
|
max_count = max(key_counts.values())
|
|
return max_count / len(keys)
|
|
|
|
def _check_energy_balance(self, kit: Any) -> float:
|
|
"""Check energy balance across kit components."""
|
|
# This is a placeholder - real implementation would analyze
|
|
# actual audio energy levels
|
|
|
|
component_count = 0
|
|
|
|
if hasattr(kit, 'drums') and kit.drums:
|
|
for attr in ['kick', 'snare', 'clap', 'hat_closed', 'hat_open']:
|
|
if getattr(kit.drums, attr, None):
|
|
component_count += 1
|
|
|
|
if hasattr(kit, 'bass') and kit.bass:
|
|
component_count += len(kit.bass)
|
|
|
|
# Score based on completeness
|
|
if component_count >= 5:
|
|
return 0.95
|
|
elif component_count >= 3:
|
|
return 0.80
|
|
else:
|
|
return 0.60
|
|
|
|
|
|
class RationaleLogger:
|
|
"""
|
|
Placeholder/Actual RationaleLogger for logging iteration decisions.
|
|
|
|
Records the reasoning behind iteration choices for debugging
|
|
and audit purposes.
|
|
"""
|
|
|
|
def __init__(self, verbose: bool = False):
|
|
self.verbose = verbose
|
|
self.entries = []
|
|
|
|
def log_iteration_start(self, attempt: int, strategy: Dict[str, Any]):
|
|
"""Log start of iteration attempt."""
|
|
entry = {
|
|
"event": "iteration_start",
|
|
"attempt": attempt,
|
|
"strategy": strategy,
|
|
"timestamp": time.time()
|
|
}
|
|
self.entries.append(entry)
|
|
if self.verbose:
|
|
logger.info(f"[Rationale] Starting attempt {attempt}: {strategy.get('note', '')}")
|
|
|
|
def log_iteration_result(
|
|
self,
|
|
attempt: int,
|
|
coherence: float,
|
|
success: bool
|
|
):
|
|
"""Log result of iteration attempt."""
|
|
entry = {
|
|
"event": "iteration_result",
|
|
"attempt": attempt,
|
|
"coherence": coherence,
|
|
"success": success,
|
|
"timestamp": time.time()
|
|
}
|
|
self.entries.append(entry)
|
|
if self.verbose:
|
|
status = "SUCCESS" if success else "FAILED"
|
|
logger.info(f"[Rationale] Attempt {attempt}: {status} (coherence={coherence:.3f})")
|
|
|
|
def log_strategy_switch(
|
|
self,
|
|
from_attempt: int,
|
|
to_attempt: int,
|
|
reason: str
|
|
):
|
|
"""Log strategy switch."""
|
|
entry = {
|
|
"event": "strategy_switch",
|
|
"from": from_attempt,
|
|
"to": to_attempt,
|
|
"reason": reason,
|
|
"timestamp": time.time()
|
|
}
|
|
self.entries.append(entry)
|
|
if self.verbose:
|
|
logger.info(f"[Rationale] Switching from {from_attempt} to {to_attempt}: {reason}")
|
|
|
|
def log_final_result(self, result: IterationResult):
|
|
"""Log final iteration result."""
|
|
entry = {
|
|
"event": "final_result",
|
|
"success": result.success,
|
|
"coherence": result.final_coherence,
|
|
"attempts_count": len(result.attempts),
|
|
"timestamp": time.time()
|
|
}
|
|
self.entries.append(entry)
|
|
logger.info(
|
|
f"[Rationale] Final result: success={result.success}, "
|
|
f"coherence={result.final_coherence:.3f}, "
|
|
f"attempts={len(result.attempts)}"
|
|
)
|
|
|
|
def get_entries(self) -> List[Dict[str, Any]]:
|
|
"""Get all logged entries."""
|
|
return self.entries.copy()
|
|
|
|
|
|
# =============================================================================
|
|
# ITERATION ENGINE
|
|
# =============================================================================
|
|
|
|
class IterationEngine:
|
|
"""
|
|
Professional-grade iteration engine for achieving target coherence.
|
|
|
|
This engine implements intelligent retry strategies to achieve coherence
|
|
scores >= 0.90. It never accepts sub-standard results - either achieves
|
|
the target or fails explicitly with actionable recommendations.
|
|
|
|
Features:
|
|
- Progressive iteration strategies with graceful degradation
|
|
- Automatic failure analysis and recovery suggestions
|
|
- Success tracking with detailed logging
|
|
- Integration with sample selection and coherence scoring
|
|
|
|
Usage:
|
|
engine = IterationEngine(target_coherence=0.90, max_attempts=5)
|
|
result = engine.iterate_until_coherence(selection_func)
|
|
|
|
if result.success:
|
|
kit = result.selected_kit
|
|
else:
|
|
# Handle failure - error already raised
|
|
pass
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
target_coherence: float = 0.90,
|
|
max_attempts: int = 5,
|
|
coherence_scorer: Optional[CoherenceScorer] = None,
|
|
rationale_logger: Optional[RationaleLogger] = None,
|
|
verbose: bool = False
|
|
):
|
|
"""
|
|
Initialize iteration engine.
|
|
|
|
Args:
|
|
target_coherence: Minimum acceptable coherence (default: 0.90)
|
|
max_attempts: Maximum iteration attempts (default: 5)
|
|
coherence_scorer: Optional custom coherence scorer
|
|
rationale_logger: Optional custom rationale logger
|
|
verbose: Enable verbose logging
|
|
"""
|
|
self.target_coherence = target_coherence
|
|
self.max_attempts = max(1, min(max_attempts, len(ITERATION_STRATEGIES)))
|
|
self.coherence_scorer = coherence_scorer or CoherenceScorer()
|
|
self.rationale_logger = rationale_logger or RationaleLogger(verbose=verbose)
|
|
self.verbose = verbose
|
|
|
|
# Tracking
|
|
self._attempts_history: List[IterationAttempt] = []
|
|
self._iteration_count = 0
|
|
self._start_time: Optional[float] = None
|
|
|
|
if verbose:
|
|
logger.info(
|
|
f"[IterationEngine] Initialized: target={target_coherence}, "
|
|
f"max_attempts={max_attempts}"
|
|
)
|
|
|
|
def iterate_until_coherence(
|
|
self,
|
|
selection_func: Callable[[Dict[str, Any]], Any],
|
|
target_coherence: Optional[float] = None,
|
|
max_attempts: Optional[int] = None
|
|
) -> IterationResult:
|
|
"""
|
|
Iterate until target coherence is achieved or max attempts reached.
|
|
|
|
Args:
|
|
selection_func: Function that takes strategy params and returns kit
|
|
target_coherence: Override default target (optional)
|
|
max_attempts: Override default max attempts (optional)
|
|
|
|
Returns:
|
|
IterationResult with success status and selected kit
|
|
|
|
Raises:
|
|
ProfessionalCoherenceError: If max attempts reached without success
|
|
"""
|
|
target = target_coherence or self.target_coherence
|
|
max_att = max_attempts or self.max_attempts
|
|
|
|
self._start_time = time.time()
|
|
self._attempts_history = []
|
|
self._iteration_count = 0
|
|
|
|
best_score = 0.0
|
|
best_kit = None
|
|
|
|
logger.info(f"[IterationEngine] Starting iteration loop: target={target}")
|
|
|
|
for attempt_idx in range(max_att):
|
|
self._iteration_count += 1
|
|
|
|
# Get strategy for this attempt
|
|
strategy = ITERATION_STRATEGIES[attempt_idx]
|
|
attempt = IterationAttempt(
|
|
attempt_number=attempt_idx + 1,
|
|
strategy=strategy
|
|
)
|
|
|
|
self.rationale_logger.log_iteration_start(
|
|
attempt.attempt_number,
|
|
strategy
|
|
)
|
|
|
|
try:
|
|
# Execute strategy
|
|
kit, coherence = self.try_strategy(strategy, selection_func)
|
|
|
|
attempt.kit_data = kit
|
|
attempt.coherence_score = coherence
|
|
attempt.duration_ms = (time.time() - attempt.timestamp) * 1000
|
|
|
|
# Track best result
|
|
if coherence > best_score:
|
|
best_score = coherence
|
|
best_kit = kit
|
|
|
|
# Check success
|
|
if coherence >= target:
|
|
attempt.status = IterationStatus.SUCCESS
|
|
self._attempts_history.append(attempt)
|
|
|
|
self.rationale_logger.log_iteration_result(
|
|
attempt.attempt_number,
|
|
coherence,
|
|
True
|
|
)
|
|
|
|
result = self._build_success_result(
|
|
coherence,
|
|
attempt,
|
|
kit
|
|
)
|
|
self.rationale_logger.log_final_result(result)
|
|
|
|
logger.info(
|
|
f"[IterationEngine] SUCCESS on attempt {attempt.attempt_number}: "
|
|
f"coherence={coherence:.3f}"
|
|
)
|
|
return result
|
|
else:
|
|
attempt.status = IterationStatus.FAILED
|
|
attempt.failure_reason = f"Coherence {coherence:.3f} < target {target}"
|
|
|
|
self.rationale_logger.log_iteration_result(
|
|
attempt.attempt_number,
|
|
coherence,
|
|
False
|
|
)
|
|
|
|
if attempt_idx < max_att - 1:
|
|
self.rationale_logger.log_strategy_switch(
|
|
attempt.attempt_number,
|
|
attempt.attempt_number + 1,
|
|
f"Coherence too low ({coherence:.3f}), trying next strategy"
|
|
)
|
|
|
|
self._attempts_history.append(attempt)
|
|
|
|
except Exception as e:
|
|
attempt.status = IterationStatus.FAILED
|
|
attempt.failure_reason = str(e)
|
|
attempt.duration_ms = (time.time() - attempt.timestamp) * 1000
|
|
self._attempts_history.append(attempt)
|
|
|
|
logger.warning(
|
|
f"[IterationEngine] Attempt {attempt.attempt_number} failed: {e}"
|
|
)
|
|
|
|
if attempt_idx < max_att - 1:
|
|
self.rationale_logger.log_strategy_switch(
|
|
attempt.attempt_number,
|
|
attempt.attempt_number + 1,
|
|
f"Exception: {str(e)[:50]}"
|
|
)
|
|
|
|
# All attempts exhausted
|
|
total_duration = (time.time() - self._start_time) * 1000
|
|
|
|
failure_reason = self.analyze_failure_reason(best_kit, best_score)
|
|
suggestions = self.suggest_improvements(failure_reason)
|
|
|
|
result = IterationResult(
|
|
success=False,
|
|
final_coherence=best_score,
|
|
attempts=self._attempts_history.copy(),
|
|
total_duration_ms=total_duration,
|
|
selected_kit=best_kit,
|
|
metadata={
|
|
"failure_reason": failure_reason,
|
|
"suggestions": suggestions,
|
|
"target_coherence": target
|
|
}
|
|
)
|
|
|
|
self.rationale_logger.log_final_result(result)
|
|
|
|
logger.error(
|
|
f"[IterationEngine] All {max_att} attempts failed. "
|
|
f"Best score: {best_score:.3f}"
|
|
)
|
|
|
|
raise ProfessionalCoherenceError(
|
|
best_score=best_score,
|
|
attempts_made=max_att,
|
|
suggestions=suggestions
|
|
)
|
|
|
|
def try_strategy(
|
|
self,
|
|
strategy: Dict[str, Any],
|
|
selection_func: Callable[[Dict[str, Any]], Any]
|
|
) -> Tuple[Any, float]:
|
|
"""
|
|
Execute a single iteration strategy.
|
|
|
|
Args:
|
|
strategy: Strategy configuration from ITERATION_STRATEGIES
|
|
selection_func: Function to select samples with given params
|
|
|
|
Returns:
|
|
Tuple of (selected_kit, coherence_score)
|
|
|
|
Raises:
|
|
Exception: If selection or scoring fails
|
|
"""
|
|
params = strategy.get("params", {}).copy()
|
|
|
|
if self.verbose:
|
|
logger.info(
|
|
f"[IterationEngine] Trying strategy {strategy.get('attempt')}: "
|
|
f"{strategy.get('note', '')}"
|
|
)
|
|
|
|
# Call selection function with strategy parameters
|
|
kit = selection_func(params)
|
|
|
|
if kit is None:
|
|
raise ValueError("Selection function returned None")
|
|
|
|
# Score the resulting kit
|
|
coherence = self.coherence_scorer.score_kit(kit)
|
|
|
|
# Attach coherence to kit for reference
|
|
if hasattr(kit, 'coherence_score'):
|
|
kit.coherence_score = coherence
|
|
|
|
if self.verbose:
|
|
logger.info(f"[IterationEngine] Strategy result: coherence={coherence:.3f}")
|
|
|
|
return kit, coherence
|
|
|
|
def analyze_failure_reason(
|
|
self,
|
|
kit: Optional[Any],
|
|
coherence_score: float
|
|
) -> str:
|
|
"""
|
|
Determine why coherence target was not achieved.
|
|
|
|
Args:
|
|
kit: Best kit achieved (may be None)
|
|
coherence_score: Best coherence score achieved
|
|
|
|
Returns:
|
|
Failure reason classification string
|
|
"""
|
|
if kit is None:
|
|
return "no_valid_selection"
|
|
|
|
if coherence_score < 0.50:
|
|
return "severe_inconsistency"
|
|
elif coherence_score < 0.70:
|
|
return "major_inconsistency"
|
|
elif coherence_score < 0.85:
|
|
return "moderate_inconsistency"
|
|
elif coherence_score < 0.90:
|
|
return "minor_inconsistency"
|
|
else:
|
|
return "target_not_met"
|
|
|
|
def suggest_improvements(self, failure_reason: str) -> List[str]:
|
|
"""
|
|
Suggest adjustments based on failure reason.
|
|
|
|
Args:
|
|
failure_reason: Reason classification from analyze_failure_reason
|
|
|
|
Returns:
|
|
List of actionable suggestions
|
|
"""
|
|
suggestions = {
|
|
"no_valid_selection": [
|
|
"Check that sample library has samples for all required roles",
|
|
"Verify selection function is working correctly",
|
|
"Ensure library path is accessible"
|
|
],
|
|
"severe_inconsistency": [
|
|
"Library may have fundamentally incompatible samples",
|
|
"Consider organizing samples by pack or producer",
|
|
"Run library analysis to identify outliers",
|
|
"Add more samples from the same genre/style"
|
|
],
|
|
"major_inconsistency": [
|
|
"Check for mixed genres in sample selection",
|
|
"Verify BPM and key metadata accuracy",
|
|
"Consider using reference-based selection",
|
|
"Filter samples by more specific criteria"
|
|
],
|
|
"moderate_inconsistency": [
|
|
"Some samples may need key adjustment",
|
|
"Check energy levels across drum components",
|
|
"Consider manual sample curation",
|
|
"Try with smaller sample sets from same source"
|
|
],
|
|
"minor_inconsistency": [
|
|
"Close to target - try with samples from same pack",
|
|
"Verify sample quality and bitrate",
|
|
"Slightly adjust target coherence if acceptable",
|
|
"Consider manual fine-tuning"
|
|
],
|
|
"target_not_met": [
|
|
"Target may be too strict for current library",
|
|
"Consider slightly lower professional threshold",
|
|
"Add more high-quality reference samples"
|
|
]
|
|
}
|
|
|
|
return suggestions.get(failure_reason, [
|
|
"Review sample library quality and consistency",
|
|
"Try reference-based selection",
|
|
"Consider adding more professional-grade samples"
|
|
])
|
|
|
|
def _build_success_result(
|
|
self,
|
|
coherence: float,
|
|
successful_attempt: IterationAttempt,
|
|
kit: Any
|
|
) -> IterationResult:
|
|
"""Build success result object."""
|
|
total_duration = (time.time() - self._start_time) * 1000 if self._start_time else 0
|
|
|
|
return IterationResult(
|
|
success=True,
|
|
final_coherence=coherence,
|
|
attempts=self._attempts_history.copy(),
|
|
successful_strategy=successful_attempt.strategy,
|
|
total_duration_ms=total_duration,
|
|
selected_kit=kit,
|
|
metadata={
|
|
"successful_attempt": successful_attempt.attempt_number,
|
|
"strategy_note": successful_attempt.strategy.get("note", ""),
|
|
"iterations_required": self._iteration_count
|
|
}
|
|
)
|
|
|
|
# -------------------------------------------------------------------------
|
|
# Tracking and Metrics
|
|
# -------------------------------------------------------------------------
|
|
|
|
def get_iteration_count(self) -> int:
|
|
"""Get number of iterations performed in last run."""
|
|
return self._iteration_count
|
|
|
|
def get_attempts_history(self) -> List[IterationAttempt]:
|
|
"""Get history of all attempts from last run."""
|
|
return self._attempts_history.copy()
|
|
|
|
def get_success_rate(self) -> float:
|
|
"""Get success rate across all attempts in last run."""
|
|
if not self._attempts_history:
|
|
return 0.0
|
|
|
|
successful = sum(
|
|
1 for a in self._attempts_history
|
|
if a.status == IterationStatus.SUCCESS
|
|
)
|
|
return successful / len(self._attempts_history)
|
|
|
|
def reset(self):
|
|
"""Reset engine state for new iteration cycle."""
|
|
self._attempts_history = []
|
|
self._iteration_count = 0
|
|
self._start_time = None
|
|
if self.verbose:
|
|
logger.info("[IterationEngine] State reset")
|
|
|
|
|
|
# =============================================================================
|
|
# CONVENIENCE FUNCTIONS
|
|
# =============================================================================
|
|
|
|
def iterate_for_coherence(
|
|
selection_func: Callable[[Dict[str, Any]], Any],
|
|
target: float = 0.90,
|
|
max_attempts: int = 5,
|
|
verbose: bool = False
|
|
) -> Any:
|
|
"""
|
|
Convenience function for one-shot iteration.
|
|
|
|
Args:
|
|
selection_func: Function to select samples
|
|
target: Target coherence score
|
|
max_attempts: Maximum attempts
|
|
verbose: Enable verbose logging
|
|
|
|
Returns:
|
|
Selected kit if successful
|
|
|
|
Raises:
|
|
ProfessionalCoherenceError: If coherence cannot be achieved
|
|
"""
|
|
engine = IterationEngine(
|
|
target_coherence=target,
|
|
max_attempts=max_attempts,
|
|
verbose=verbose
|
|
)
|
|
|
|
result = engine.iterate_until_coherence(selection_func)
|
|
return result.selected_kit
|
|
|
|
|
|
def quick_coherence_check(kit: Any) -> float:
|
|
"""
|
|
Quick coherence check for a kit.
|
|
|
|
Args:
|
|
kit: Kit to evaluate
|
|
|
|
Returns:
|
|
Coherence score (0.0 - 1.0)
|
|
"""
|
|
scorer = CoherenceScorer()
|
|
return scorer.score_kit(kit)
|
|
|
|
|
|
# =============================================================================
|
|
# EXPORTS
|
|
# =============================================================================
|
|
|
|
__all__ = [
|
|
"IterationEngine",
|
|
"ProfessionalCoherenceError",
|
|
"CoherenceScorer",
|
|
"RationaleLogger",
|
|
"IterationResult",
|
|
"IterationAttempt",
|
|
"IterationStatus",
|
|
"ITERATION_STRATEGIES",
|
|
"iterate_for_coherence",
|
|
"quick_coherence_check",
|
|
]
|