Cambios principales: ## Nuevos archivos - services/ai/parallel_provider.py: Ejecución paralela de múltiples proveedores AI - services/ai/prompt_manager.py: Gestión centralizada de prompts (resumen.md como fuente) - latex/resumen.md: Template del prompt para resúmenes académicos LaTeX ## Mejoras en generación LaTeX (document/generators.py) - Nueva función _sanitize_latex(): Corrige automáticamente errores comunes de AI - Agrega align=center a nodos TikZ con saltos de línea (\\) - Previene errores 'Not allowed in LR mode' antes de compilar - Soporte para procesamiento paralelo de proveedores AI - Conversión DOCX en paralelo con generación PDF - Uploads a Notion en background (non-blocking) - Callbacks de notificación para progreso en Telegram ## Mejoras en proveedores AI - claude_provider.py: fix_latex() con instrucciones específicas para errores TikZ - gemini_provider.py: fix_latex() mejorado + rate limiting + circuit breaker - provider_factory.py: Soporte para parallel provider ## Otros cambios - config/settings.py: Nuevas configuraciones para Gemini models - services/webdav_service.py: Mejoras en manejo de conexión - .gitignore: Ignora archivos LaTeX auxiliares (.aux, .toc, .out, .pdf) ## Archivos de ejemplo - latex/imperio_romano.tex, latex/clase_revolucion_rusa_crisis_30.tex - resumen_curiosidades.tex (corregido y compilado exitosamente)
81 lines
2.7 KiB
Python
81 lines
2.7 KiB
Python
"""
|
|
AI Provider Factory (Factory Pattern)
|
|
"""
|
|
|
|
import logging
|
|
from typing import Dict, Type, Optional
|
|
|
|
from core import AIProcessingError
|
|
from .base_provider import AIProvider
|
|
from .claude_provider import ClaudeProvider
|
|
from .gemini_provider import GeminiProvider
|
|
from .parallel_provider import ParallelAIProvider
|
|
|
|
|
|
class AIProviderFactory:
|
|
"""Factory for creating AI providers with fallback and parallel execution"""
|
|
|
|
def __init__(self):
|
|
self.logger = logging.getLogger(__name__)
|
|
self._providers: Dict[str, AIProvider] = {
|
|
"claude": ClaudeProvider(),
|
|
"gemini": GeminiProvider(),
|
|
}
|
|
self._parallel_provider: Optional[ParallelAIProvider] = None
|
|
|
|
def get_provider(self, preferred: str = "gemini") -> AIProvider:
|
|
"""Get available provider with fallback"""
|
|
# Try preferred provider first
|
|
if preferred in self._providers:
|
|
provider = self._providers[preferred]
|
|
if provider.is_available():
|
|
self.logger.info(f"Using {preferred} provider")
|
|
return provider
|
|
|
|
# Fallback to any available provider
|
|
for name, provider in self._providers.items():
|
|
if provider.is_available():
|
|
self.logger.info(f"Falling back to {name} provider")
|
|
return provider
|
|
|
|
raise AIProcessingError("No AI providers available")
|
|
|
|
def get_all_available(self) -> Dict[str, AIProvider]:
|
|
"""Get all available providers"""
|
|
return {
|
|
name: provider
|
|
for name, provider in self._providers.items()
|
|
if provider.is_available()
|
|
}
|
|
|
|
def get_best_provider(self) -> AIProvider:
|
|
"""Get the best available provider (Claude > Gemini)"""
|
|
return self.get_provider("claude")
|
|
|
|
def get_parallel_provider(self, max_workers: int = 4) -> ParallelAIProvider:
|
|
"""Get parallel provider for racing multiple AI providers"""
|
|
available = self.get_all_available()
|
|
|
|
if not available:
|
|
raise AIProcessingError("No providers available for parallel execution")
|
|
|
|
if self._parallel_provider is None:
|
|
self._parallel_provider = ParallelAIProvider(
|
|
providers=available,
|
|
max_workers=max_workers
|
|
)
|
|
self.logger.info(
|
|
f"Created parallel provider with {len(available)} workers: "
|
|
f"{', '.join(available.keys())}"
|
|
)
|
|
|
|
return self._parallel_provider
|
|
|
|
def use_parallel(self) -> bool:
|
|
"""Check if parallel execution should be used (multiple providers available)"""
|
|
return len(self.get_all_available()) > 1
|
|
|
|
|
|
# Global instance
|
|
ai_provider_factory = AIProviderFactory()
|