feat: Implement senior audio injection with 5 fallback methods
- Add _cmd_create_arrangement_audio_pattern with 5-method fallback chain - Method 1: track.insert_arrangement_clip() [Live 12+] - Method 2: track.create_audio_clip() [Live 11+] - Method 3: arrangement_clips.add_new_clip() [Live 12+] - Method 4: Session->duplicate_clip_to_arrangement [Legacy] - Method 5: Session->Recording [Universal] - Add _cmd_duplicate_clip_to_arrangement for session-to-arrangement workflow - Update skills documentation - Verified: 3 clips created at positions [0, 4, 8] in Arrangement View Closes: Audio injection in Arrangement View
This commit is contained in:
832
mcp_server/engines/preset_manager.py
Normal file
832
mcp_server/engines/preset_manager.py
Normal file
@@ -0,0 +1,832 @@
|
||||
"""
|
||||
PresetManager - Save/Load Coherent Sample Kits
|
||||
|
||||
Manages coherent sample kit presets with CRUD operations,
|
||||
similarity matching, and usage tracking.
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
import hashlib
|
||||
import shutil
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Any, Tuple
|
||||
from dataclasses import dataclass, asdict
|
||||
|
||||
|
||||
@dataclass
|
||||
class SampleEntry:
|
||||
"""Represents a sample in a kit with variations."""
|
||||
base: str
|
||||
variations: Dict[str, str] = None
|
||||
|
||||
def __post_init__(self):
|
||||
if self.variations is None:
|
||||
self.variations = {}
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
return {
|
||||
"base": self.base,
|
||||
"variations": self.variations
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict) -> 'SampleEntry':
|
||||
return cls(
|
||||
base=data.get("base", ""),
|
||||
variations=data.get("variations", {})
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class CoherenceProof:
|
||||
"""Coherence verification data for a kit."""
|
||||
overall_score: float
|
||||
pair_scores: List[Dict[str, Any]]
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
return {
|
||||
"overall_score": self.overall_score,
|
||||
"pair_scores": self.pair_scores
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict) -> 'CoherenceProof':
|
||||
return cls(
|
||||
overall_score=data.get("overall_score", 0.0),
|
||||
pair_scores=data.get("pair_scores", [])
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class KitMetadata:
|
||||
"""Metadata for a sample kit preset."""
|
||||
genre: str
|
||||
style: str
|
||||
tempo: int
|
||||
key: str
|
||||
coherence_score: float
|
||||
variation_level: str = "medium"
|
||||
tags: List[str] = None
|
||||
|
||||
def __post_init__(self):
|
||||
if self.tags is None:
|
||||
self.tags = []
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
return {
|
||||
"genre": self.genre,
|
||||
"style": self.style,
|
||||
"tempo": self.tempo,
|
||||
"key": self.key,
|
||||
"coherence_score": self.coherence_score,
|
||||
"variation_level": self.variation_level,
|
||||
"tags": self.tags
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict) -> 'KitMetadata':
|
||||
return cls(
|
||||
genre=data.get("genre", "unknown"),
|
||||
style=data.get("style", "standard"),
|
||||
tempo=data.get("tempo", 95),
|
||||
key=data.get("key", "Am"),
|
||||
coherence_score=data.get("coherence_score", 0.0),
|
||||
variation_level=data.get("variation_level", "medium"),
|
||||
tags=data.get("tags", [])
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Preset:
|
||||
"""Complete preset structure for a coherent sample kit."""
|
||||
name: str
|
||||
description: str
|
||||
created_at: str
|
||||
metadata: KitMetadata
|
||||
kit: Dict[str, SampleEntry]
|
||||
coherence_proof: CoherenceProof
|
||||
usage_count: int = 0
|
||||
last_used: str = ""
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
return {
|
||||
"name": self.name,
|
||||
"description": self.description,
|
||||
"created_at": self.created_at,
|
||||
"metadata": self.metadata.to_dict(),
|
||||
"kit": {k: v.to_dict() for k, v in self.kit.items()},
|
||||
"coherence_proof": self.coherence_proof.to_dict(),
|
||||
"usage_count": self.usage_count,
|
||||
"last_used": self.last_used
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict) -> 'Preset':
|
||||
return cls(
|
||||
name=data.get("name", "Unnamed"),
|
||||
description=data.get("description", ""),
|
||||
created_at=data.get("created_at", ""),
|
||||
metadata=KitMetadata.from_dict(data.get("metadata", {})),
|
||||
kit={k: SampleEntry.from_dict(v) for k, v in data.get("kit", {}).items()},
|
||||
coherence_proof=CoherenceProof.from_dict(data.get("coherence_proof", {})),
|
||||
usage_count=data.get("usage_count", 0),
|
||||
last_used=data.get("last_used", "")
|
||||
)
|
||||
|
||||
|
||||
class PresetManager:
|
||||
"""
|
||||
Manages coherent sample kit presets with save/load/search capabilities.
|
||||
|
||||
Features:
|
||||
- CRUD operations for presets
|
||||
- Search and filter by genre, style, coherence
|
||||
- Similarity matching between kits
|
||||
- Usage tracking
|
||||
- Duplicate detection
|
||||
- Import/export for sharing
|
||||
"""
|
||||
|
||||
def __init__(self, presets_dir: Optional[str] = None):
|
||||
"""
|
||||
Initialize PresetManager.
|
||||
|
||||
Args:
|
||||
presets_dir: Directory for preset storage. If None, uses default.
|
||||
"""
|
||||
if presets_dir is None:
|
||||
# Default to AbletonMCP_AI/presets/
|
||||
base_dir = Path(__file__).parent.parent.parent
|
||||
self.presets_dir = base_dir / "presets"
|
||||
else:
|
||||
self.presets_dir = Path(presets_dir)
|
||||
|
||||
# Ensure directory exists
|
||||
self.presets_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Cache for loaded presets
|
||||
self._cache: Dict[str, Preset] = {}
|
||||
self._cache_timestamp: Optional[datetime] = None
|
||||
|
||||
def _generate_filename(self, metadata: KitMetadata) -> str:
|
||||
"""
|
||||
Generate filename from metadata.
|
||||
|
||||
Format: {genre}_{style}_{coherence}_{timestamp}.json
|
||||
"""
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
coherence_str = f"{metadata.coherence_score:.2f}"
|
||||
safe_genre = metadata.genre.replace(" ", "_").lower()
|
||||
safe_style = metadata.style.replace(" ", "_").lower()
|
||||
return f"{safe_genre}_{safe_style}_{coherence_str}_{timestamp}.json"
|
||||
|
||||
def _generate_name(self, metadata: KitMetadata, kit: Dict[str, SampleEntry]) -> str:
|
||||
"""
|
||||
Auto-generate meaningful preset name.
|
||||
|
||||
Based on genre, style, key elements in kit.
|
||||
"""
|
||||
# Base name from style
|
||||
base_name = metadata.style.replace("_", " ").title()
|
||||
|
||||
# Add descriptors based on kit contents
|
||||
descriptors = []
|
||||
|
||||
if "kick" in kit:
|
||||
kick_path = kit["kick"].base.lower()
|
||||
if "pesado" in kick_path or "heavy" in kick_path:
|
||||
descriptors.append("Pesado")
|
||||
elif "sutil" in kick_path or "soft" in kick_path:
|
||||
descriptors.append("Suave")
|
||||
elif "estampido" in kick_path:
|
||||
descriptors.append("Estampido")
|
||||
|
||||
if "bass" in kit:
|
||||
descriptors.append("Con Bajo")
|
||||
|
||||
# Add coherence quality
|
||||
if metadata.coherence_score >= 0.95:
|
||||
descriptors.append("Ultra")
|
||||
elif metadata.coherence_score >= 0.90:
|
||||
descriptors.append("Premium")
|
||||
|
||||
# Combine
|
||||
if descriptors:
|
||||
descriptor_str = ", ".join(descriptors[:2]) # Max 2 descriptors
|
||||
name = f"{base_name} ({descriptor_str})"
|
||||
else:
|
||||
name = base_name
|
||||
|
||||
# Add uniqueness number
|
||||
existing = self._get_existing_names()
|
||||
count = 1
|
||||
final_name = name
|
||||
while final_name in existing:
|
||||
count += 1
|
||||
final_name = f"{name} #{count}"
|
||||
|
||||
return final_name
|
||||
|
||||
def _generate_description(self, metadata: KitMetadata, kit: Dict[str, SampleEntry]) -> str:
|
||||
"""Generate human-readable description."""
|
||||
parts = [
|
||||
f"{metadata.tempo}bpm {metadata.key}",
|
||||
]
|
||||
|
||||
# Describe key elements
|
||||
elements = []
|
||||
if "kick" in kit:
|
||||
kick_file = os.path.basename(kit["kick"].base)
|
||||
elements.append(f"kick: {kick_file.replace('.wav', '').replace('_', ' ')}")
|
||||
if "snare" in kit:
|
||||
elements.append("snare incluido")
|
||||
if "bass" in kit:
|
||||
elements.append("bass presente")
|
||||
|
||||
if elements:
|
||||
parts.append(", ".join(elements))
|
||||
|
||||
# Add energy description
|
||||
if metadata.coherence_score >= 0.95:
|
||||
parts.append("coherencia excepcional")
|
||||
elif metadata.coherence_score >= 0.90:
|
||||
parts.append("alta coherencia")
|
||||
|
||||
return " | ".join(parts)
|
||||
|
||||
def _get_existing_names(self) -> set:
|
||||
"""Get set of existing preset names."""
|
||||
names = set()
|
||||
for filename in self.presets_dir.glob("*.json"):
|
||||
try:
|
||||
with open(filename, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
names.add(data.get("name", ""))
|
||||
except:
|
||||
pass
|
||||
return names
|
||||
|
||||
def _compute_kit_hash(self, kit: Dict[str, SampleEntry]) -> str:
|
||||
"""
|
||||
Compute hash for kit to detect duplicates.
|
||||
|
||||
Uses base sample paths only (not variations).
|
||||
"""
|
||||
# Extract base paths and sort for consistency
|
||||
base_paths = []
|
||||
for role in sorted(kit.keys()):
|
||||
entry = kit[role]
|
||||
base_paths.append(f"{role}:{entry.base}")
|
||||
|
||||
# Create hash
|
||||
content = "|".join(base_paths)
|
||||
return hashlib.md5(content.encode()).hexdigest()[:16]
|
||||
|
||||
def _check_duplicate(self, kit: Dict[str, SampleEntry]) -> Optional[str]:
|
||||
"""
|
||||
Check if kit already exists as a preset.
|
||||
|
||||
Returns preset name if duplicate found, None otherwise.
|
||||
"""
|
||||
kit_hash = self._compute_kit_hash(kit)
|
||||
|
||||
for filename in self.presets_dir.glob("*.json"):
|
||||
try:
|
||||
with open(filename, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
existing_kit = data.get("kit", {})
|
||||
existing_hash = self._compute_kit_hash(
|
||||
{k: SampleEntry.from_dict(v) for k, v in existing_kit.items()}
|
||||
)
|
||||
if existing_hash == kit_hash:
|
||||
return data.get("name")
|
||||
except:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
def save_preset(
|
||||
self,
|
||||
name: Optional[str],
|
||||
kit: Dict[str, Any],
|
||||
coherence_score: float,
|
||||
metadata: Dict[str, Any],
|
||||
coherence_proof: Optional[Dict] = None,
|
||||
allow_duplicates: bool = False
|
||||
) -> Tuple[bool, str, Preset]:
|
||||
"""
|
||||
Save a new preset.
|
||||
|
||||
Args:
|
||||
name: Preset name (auto-generated if None)
|
||||
kit: Dictionary of role -> {base: path, variations: {context: path}}
|
||||
coherence_score: Overall coherence score (0.0-1.0)
|
||||
metadata: Dict with genre, style, tempo, key, etc.
|
||||
coherence_proof: Optional detailed coherence data
|
||||
allow_duplicates: If False, checks for existing identical kits
|
||||
|
||||
Returns:
|
||||
Tuple of (success: bool, message: str, preset: Preset)
|
||||
"""
|
||||
# Convert kit to SampleEntry objects
|
||||
kit_entries = {}
|
||||
for role, entry_data in kit.items():
|
||||
if isinstance(entry_data, dict):
|
||||
kit_entries[role] = SampleEntry.from_dict(entry_data)
|
||||
else:
|
||||
# Assume it's just a path string
|
||||
kit_entries[role] = SampleEntry(base=str(entry_data), variations={})
|
||||
|
||||
# Create metadata object
|
||||
kit_metadata = KitMetadata.from_dict(metadata)
|
||||
kit_metadata.coherence_score = coherence_score
|
||||
|
||||
# Check for duplicates
|
||||
if not allow_duplicates:
|
||||
duplicate_name = self._check_duplicate(kit_entries)
|
||||
if duplicate_name:
|
||||
return (False, f"Duplicate of existing preset: '{duplicate_name}'", None)
|
||||
|
||||
# Generate name if not provided
|
||||
if not name:
|
||||
name = self._generate_name(kit_metadata, kit_entries)
|
||||
|
||||
# Generate description
|
||||
description = self._generate_description(kit_metadata, kit_entries)
|
||||
|
||||
# Create coherence proof
|
||||
if coherence_proof is None:
|
||||
coherence_proof = {
|
||||
"overall_score": coherence_score,
|
||||
"pair_scores": []
|
||||
}
|
||||
|
||||
proof = CoherenceProof.from_dict(coherence_proof)
|
||||
|
||||
# Create preset
|
||||
preset = Preset(
|
||||
name=name,
|
||||
description=description,
|
||||
created_at=datetime.now().isoformat(),
|
||||
metadata=kit_metadata,
|
||||
kit=kit_entries,
|
||||
coherence_proof=proof,
|
||||
usage_count=0,
|
||||
last_used=""
|
||||
)
|
||||
|
||||
# Generate filename
|
||||
filename = self._generate_filename(kit_metadata)
|
||||
filepath = self.presets_dir / filename
|
||||
|
||||
# Save to file
|
||||
try:
|
||||
with open(filepath, 'w', encoding='utf-8') as f:
|
||||
json.dump(preset.to_dict(), f, indent=2, ensure_ascii=False)
|
||||
|
||||
# Update cache
|
||||
self._cache[name] = preset
|
||||
|
||||
return (True, f"Saved preset '{name}' to {filename}", preset)
|
||||
except Exception as e:
|
||||
return (False, f"Failed to save preset: {str(e)}", None)
|
||||
|
||||
def load_preset(self, name: str) -> Tuple[bool, str, Optional[Preset]]:
|
||||
"""
|
||||
Load a preset by name.
|
||||
|
||||
Args:
|
||||
name: Preset name to load
|
||||
|
||||
Returns:
|
||||
Tuple of (success: bool, message: str, preset: Optional[Preset])
|
||||
"""
|
||||
# Check cache first
|
||||
if name in self._cache:
|
||||
return (True, "Loaded from cache", self._cache[name])
|
||||
|
||||
# Search files
|
||||
for filename in self.presets_dir.glob("*.json"):
|
||||
try:
|
||||
with open(filename, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
if data.get("name") == name:
|
||||
preset = Preset.from_dict(data)
|
||||
self._cache[name] = preset
|
||||
return (True, f"Loaded from {filename.name}", preset)
|
||||
except Exception as e:
|
||||
continue
|
||||
|
||||
return (False, f"Preset '{name}' not found", None)
|
||||
|
||||
def list_presets(
|
||||
self,
|
||||
genre: Optional[str] = None,
|
||||
style: Optional[str] = None,
|
||||
min_coherence: float = 0.0,
|
||||
max_coherence: float = 1.0,
|
||||
tags: Optional[List[str]] = None,
|
||||
sort_by: str = "coherence", # "coherence", "usage", "date", "name"
|
||||
limit: int = 100
|
||||
) -> List[Preset]:
|
||||
"""
|
||||
List presets with filtering and sorting.
|
||||
|
||||
Args:
|
||||
genre: Filter by genre
|
||||
style: Filter by style
|
||||
min_coherence: Minimum coherence score
|
||||
max_coherence: Maximum coherence score
|
||||
tags: Filter by tags (all must match)
|
||||
sort_by: Sort field ("coherence", "usage", "date", "name")
|
||||
limit: Maximum results to return
|
||||
|
||||
Returns:
|
||||
List of matching Preset objects
|
||||
"""
|
||||
presets = []
|
||||
|
||||
for filename in self.presets_dir.glob("*.json"):
|
||||
try:
|
||||
with open(filename, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
preset = Preset.from_dict(data)
|
||||
|
||||
# Apply filters
|
||||
if genre and preset.metadata.genre.lower() != genre.lower():
|
||||
continue
|
||||
|
||||
if style and preset.metadata.style.lower() != style.lower():
|
||||
continue
|
||||
|
||||
if preset.metadata.coherence_score < min_coherence:
|
||||
continue
|
||||
|
||||
if preset.metadata.coherence_score > max_coherence:
|
||||
continue
|
||||
|
||||
if tags:
|
||||
preset_tags = set(t.lower() for t in preset.metadata.tags)
|
||||
if not all(t.lower() in preset_tags for t in tags):
|
||||
continue
|
||||
|
||||
presets.append(preset)
|
||||
except:
|
||||
pass
|
||||
|
||||
# Sort
|
||||
if sort_by == "coherence":
|
||||
presets.sort(key=lambda p: p.metadata.coherence_score, reverse=True)
|
||||
elif sort_by == "usage":
|
||||
presets.sort(key=lambda p: p.usage_count, reverse=True)
|
||||
elif sort_by == "date":
|
||||
presets.sort(key=lambda p: p.created_at, reverse=True)
|
||||
elif sort_by == "name":
|
||||
presets.sort(key=lambda p: p.name.lower())
|
||||
|
||||
return presets[:limit]
|
||||
|
||||
def find_similar_presets(
|
||||
self,
|
||||
reference_kit: Dict[str, Any],
|
||||
count: int = 5,
|
||||
min_coherence: float = 0.85
|
||||
) -> List[Tuple[Preset, float]]:
|
||||
"""
|
||||
Find presets similar to a reference kit.
|
||||
|
||||
Args:
|
||||
reference_kit: Dictionary of role -> sample paths
|
||||
count: Number of results to return
|
||||
min_coherence: Minimum coherence for candidates
|
||||
|
||||
Returns:
|
||||
List of (preset, similarity_score) tuples
|
||||
"""
|
||||
# Get all presets above minimum coherence
|
||||
candidates = self.list_presets(min_coherence=min_coherence)
|
||||
|
||||
if not candidates:
|
||||
return []
|
||||
|
||||
# Calculate similarity scores
|
||||
scored_presets = []
|
||||
|
||||
for preset in candidates:
|
||||
score = self._calculate_similarity(reference_kit, preset)
|
||||
scored_presets.append((preset, score))
|
||||
|
||||
# Sort by score
|
||||
scored_presets.sort(key=lambda x: x[1], reverse=True)
|
||||
|
||||
return scored_presets[:count]
|
||||
|
||||
def _calculate_similarity(
|
||||
self,
|
||||
reference_kit: Dict[str, Any],
|
||||
preset: Preset
|
||||
) -> float:
|
||||
"""
|
||||
Calculate similarity between reference kit and preset.
|
||||
|
||||
Based on:
|
||||
- Role overlap (same roles present)
|
||||
- Sample path similarity (same pack, similar names)
|
||||
- Metadata match (tempo, key)
|
||||
"""
|
||||
scores = []
|
||||
|
||||
# Role overlap
|
||||
ref_roles = set(reference_kit.keys())
|
||||
preset_roles = set(preset.kit.keys())
|
||||
|
||||
if ref_roles and preset_roles:
|
||||
intersection = len(ref_roles & preset_roles)
|
||||
union = len(ref_roles | preset_roles)
|
||||
role_score = intersection / union if union > 0 else 0
|
||||
scores.append(role_score)
|
||||
|
||||
# Sample name similarity for matching roles
|
||||
name_scores = []
|
||||
for role in ref_roles & preset_roles:
|
||||
ref_entry = reference_kit[role]
|
||||
if isinstance(ref_entry, dict):
|
||||
ref_path = ref_entry.get("base", "")
|
||||
else:
|
||||
ref_path = str(ref_entry)
|
||||
|
||||
preset_path = preset.kit[role].base
|
||||
|
||||
# Extract filenames
|
||||
ref_name = os.path.basename(ref_path).lower().replace(".wav", "")
|
||||
preset_name = os.path.basename(preset_path).lower().replace(".wav", "")
|
||||
|
||||
# Check for common words
|
||||
ref_words = set(ref_name.split("_"))
|
||||
preset_words = set(preset_name.split("_"))
|
||||
|
||||
if ref_words and preset_words:
|
||||
common = len(ref_words & preset_words)
|
||||
total = len(ref_words | preset_words)
|
||||
name_scores.append(common / total if total > 0 else 0)
|
||||
|
||||
if name_scores:
|
||||
scores.append(sum(name_scores) / len(name_scores))
|
||||
|
||||
# Combine scores
|
||||
return sum(scores) / len(scores) if scores else 0.0
|
||||
|
||||
def delete_preset(self, name: str) -> Tuple[bool, str]:
|
||||
"""
|
||||
Delete a preset by name.
|
||||
|
||||
Args:
|
||||
name: Preset name to delete
|
||||
|
||||
Returns:
|
||||
Tuple of (success: bool, message: str)
|
||||
"""
|
||||
# Find file
|
||||
for filename in self.presets_dir.glob("*.json"):
|
||||
try:
|
||||
with open(filename, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
if data.get("name") == name:
|
||||
# Delete file
|
||||
filename.unlink()
|
||||
|
||||
# Remove from cache
|
||||
if name in self._cache:
|
||||
del self._cache[name]
|
||||
|
||||
return (True, f"Deleted preset '{name}'")
|
||||
except:
|
||||
pass
|
||||
|
||||
return (False, f"Preset '{name}' not found")
|
||||
|
||||
def increment_usage(self, name: str) -> Tuple[bool, str]:
|
||||
"""
|
||||
Increment usage counter for a preset.
|
||||
|
||||
Args:
|
||||
name: Preset name
|
||||
|
||||
Returns:
|
||||
Tuple of (success: bool, message: str)
|
||||
"""
|
||||
success, msg, preset = self.load_preset(name)
|
||||
|
||||
if not success or preset is None:
|
||||
return (False, msg)
|
||||
|
||||
# Update usage
|
||||
preset.usage_count += 1
|
||||
preset.last_used = datetime.now().isoformat()
|
||||
|
||||
# Find and update file
|
||||
for filename in self.presets_dir.glob("*.json"):
|
||||
try:
|
||||
with open(filename, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
if data.get("name") == name:
|
||||
# Update and save
|
||||
data["usage_count"] = preset.usage_count
|
||||
data["last_used"] = preset.last_used
|
||||
|
||||
with open(filename, 'w', encoding='utf-8') as f:
|
||||
json.dump(data, f, indent=2, ensure_ascii=False)
|
||||
|
||||
# Update cache
|
||||
self._cache[name] = preset
|
||||
|
||||
return (True, f"Usage count: {preset.usage_count}")
|
||||
except:
|
||||
pass
|
||||
|
||||
return (False, "Failed to update usage count")
|
||||
|
||||
def export_preset(self, name: str, path: str) -> Tuple[bool, str]:
|
||||
"""
|
||||
Export a preset to an external location for sharing.
|
||||
|
||||
Args:
|
||||
name: Preset name to export
|
||||
path: Destination path
|
||||
|
||||
Returns:
|
||||
Tuple of (success: bool, message: str)
|
||||
"""
|
||||
success, msg, preset = self.load_preset(name)
|
||||
|
||||
if not success or preset is None:
|
||||
return (False, msg)
|
||||
|
||||
try:
|
||||
dest_path = Path(path)
|
||||
|
||||
# Create directory if needed
|
||||
dest_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Export as JSON
|
||||
with open(dest_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(preset.to_dict(), f, indent=2, ensure_ascii=False)
|
||||
|
||||
return (True, f"Exported to {dest_path}")
|
||||
except Exception as e:
|
||||
return (False, f"Export failed: {str(e)}")
|
||||
|
||||
def import_preset(self, path: str, allow_overwrite: bool = False) -> Tuple[bool, str, Optional[Preset]]:
|
||||
"""
|
||||
Import a preset from an external file.
|
||||
|
||||
Args:
|
||||
path: Path to external preset JSON
|
||||
allow_overwrite: If True, overwrites existing preset with same name
|
||||
|
||||
Returns:
|
||||
Tuple of (success: bool, message: str, preset: Optional[Preset])
|
||||
"""
|
||||
try:
|
||||
source_path = Path(path)
|
||||
|
||||
if not source_path.exists():
|
||||
return (False, f"File not found: {path}", None)
|
||||
|
||||
# Load preset data
|
||||
with open(source_path, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
|
||||
preset = Preset.from_dict(data)
|
||||
|
||||
# Check for existing
|
||||
existing = self.load_preset(preset.name)
|
||||
if existing[0] and not allow_overwrite:
|
||||
return (False, f"Preset '{preset.name}' already exists (use allow_overwrite=True)", None)
|
||||
|
||||
# Generate new filename
|
||||
filename = self._generate_filename(preset.metadata)
|
||||
dest_path = self.presets_dir / filename
|
||||
|
||||
# Copy file
|
||||
shutil.copy2(source_path, dest_path)
|
||||
|
||||
# Update cache
|
||||
self._cache[preset.name] = preset
|
||||
|
||||
return (True, f"Imported preset '{preset.name}'", preset)
|
||||
|
||||
except Exception as e:
|
||||
return (False, f"Import failed: {str(e)}", None)
|
||||
|
||||
def get_preset_stats(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get statistics about stored presets.
|
||||
|
||||
Returns:
|
||||
Dictionary with statistics
|
||||
"""
|
||||
presets = self.list_presets(limit=10000)
|
||||
|
||||
if not presets:
|
||||
return {
|
||||
"total_presets": 0,
|
||||
"avg_coherence": 0.0,
|
||||
"genres": {},
|
||||
"styles": {},
|
||||
"most_used": None
|
||||
}
|
||||
|
||||
# Calculate stats
|
||||
coherence_scores = [p.metadata.coherence_score for p in presets]
|
||||
|
||||
genres = {}
|
||||
styles = {}
|
||||
for p in presets:
|
||||
genres[p.metadata.genre] = genres.get(p.metadata.genre, 0) + 1
|
||||
styles[p.metadata.style] = styles.get(p.metadata.style, 0) + 1
|
||||
|
||||
most_used = max(presets, key=lambda p: p.usage_count)
|
||||
|
||||
return {
|
||||
"total_presets": len(presets),
|
||||
"avg_coherence": sum(coherence_scores) / len(coherence_scores),
|
||||
"min_coherence": min(coherence_scores),
|
||||
"max_coherence": max(coherence_scores),
|
||||
"genres": genres,
|
||||
"styles": styles,
|
||||
"most_used": {
|
||||
"name": most_used.name,
|
||||
"usage_count": most_used.usage_count
|
||||
} if most_used.usage_count > 0 else None
|
||||
}
|
||||
|
||||
def clear_cache(self):
|
||||
"""Clear the preset cache."""
|
||||
self._cache.clear()
|
||||
self._cache_timestamp = None
|
||||
|
||||
|
||||
# Convenience functions for direct usage
|
||||
def get_preset_manager() -> PresetManager:
|
||||
"""Get default PresetManager instance."""
|
||||
return PresetManager()
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
# Create manager
|
||||
manager = PresetManager()
|
||||
|
||||
# Example kit
|
||||
example_kit = {
|
||||
"kick": {
|
||||
"base": "/path/to/Kick_Pesado_01.wav",
|
||||
"variations": {
|
||||
"intro": "/path/to/Kick_Sutil_12.wav",
|
||||
"verse": "/path/to/Kick_Estampido_07.wav",
|
||||
"chorus": "/path/to/Kick_Agresivo_03.wav"
|
||||
}
|
||||
},
|
||||
"snare": {
|
||||
"base": "/path/to/Snare_Corte_01.wav",
|
||||
"variations": {}
|
||||
},
|
||||
"bass": {
|
||||
"base": "/path/to/Bass_Profundo_02.wav",
|
||||
"variations": {}
|
||||
}
|
||||
}
|
||||
|
||||
# Example metadata
|
||||
metadata = {
|
||||
"genre": "reggaeton",
|
||||
"style": "perreo_intenso",
|
||||
"tempo": 95,
|
||||
"key": "Am",
|
||||
"variation_level": "high",
|
||||
"tags": ["heavy", "energetic"]
|
||||
}
|
||||
|
||||
# Save preset
|
||||
success, msg, preset = manager.save_preset(
|
||||
name=None, # Auto-generate
|
||||
kit=example_kit,
|
||||
coherence_score=0.91,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
print(f"Save: {success} - {msg}")
|
||||
|
||||
# List presets
|
||||
presets = manager.list_presets(sort_by="coherence")
|
||||
print(f"\nFound {len(presets)} presets:")
|
||||
for p in presets:
|
||||
print(f" - {p.name} ({p.metadata.coherence_score:.2f})")
|
||||
|
||||
# Stats
|
||||
stats = manager.get_preset_stats()
|
||||
print(f"\nStats: {stats}")
|
||||
Reference in New Issue
Block a user