Initial commit - cleaned for CV

This commit is contained in:
Renato97
2026-03-31 01:28:25 -03:00
commit b1b3488c49
64 changed files with 13892 additions and 0 deletions

16
.dockerignore Normal file
View File

@@ -0,0 +1,16 @@
.git
.gitignore
__pycache__/
*.pyc
*.pyo
*.pyd
*.pid
*.lock
*.log
.env
.venv/
downloads/
resumenes_docx/
logs/
resumen_clase.md
resumen_instituciones_gobierno.md

84
.env.example Normal file
View File

@@ -0,0 +1,84 @@
# =============================================================================
# CBCFacil Configuration Template
# =============================================================================
# Copy this file to .env.secrets and fill in your actual values
# NEVER commit .env.secrets to version control
# =============================================================================
# =============================================================================
# Application Configuration
# =============================================================================
DEBUG=false
LOG_LEVEL=INFO
# =============================================================================
# Nextcloud/WebDAV Configuration (Required for file sync)
# =============================================================================
NEXTCLOUD_URL=https://your-nextcloud.example.com/remote.php/webdav
NEXTCLOUD_USER=your_username
NEXTCLOUD_PASSWORD=your_secure_password
# =============================================================================
# AI Providers Configuration (Required for summarization)
# =============================================================================
# Option 1: Claude/Anthropic
ANTHROPIC_AUTH_TOKEN=your_claude_api_token_here
ZAI_BASE_URL=https://api.z.ai/api/anthropic
# Option 2: Google Gemini
GEMINI_API_KEY=your_gemini_api_key_here
# =============================================================================
# CLI Tools (Optional - for local AI tools)
# =============================================================================
CLAUDE_CLI_PATH=/path/to/claude # or leave empty
GEMINI_CLI_PATH=/path/to/gemini # or leave empty
# =============================================================================
# Telegram Notifications (Optional)
# =============================================================================
TELEGRAM_TOKEN=your_telegram_bot_token
TELEGRAM_CHAT_ID=your_telegram_chat_id
# =============================================================================
# Notion Integration (Optional - for automatic PDF uploads)
# =============================================================================
# Get your token from: https://developers.notion.com/docs/create-a-notion-integration
NOTION_API=ntn_YOUR_NOTION_INTEGRATION_TOKEN_HERE
# Get your database ID from the database URL in Notion
NOTION_DATABASE_ID=your_database_id_here
# =============================================================================
# Dashboard Configuration (Required for production)
# =============================================================================
# Generate a secure key with: python -c "import os; print(os.urandom(24).hex())"
DASHBOARD_SECRET_KEY=generate_a_secure_random_key_here
DASHBOARD_HOST=0.0.0.0
DASHBOARD_PORT=5000
# =============================================================================
# GPU Configuration (Optional)
# =============================================================================
# Use specific GPU: CUDA_VISIBLE_DEVICES=0
# Use all GPUs: CUDA_VISIBLE_DEVICES=all
CUDA_VISIBLE_DEVICES=all
PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:512
# =============================================================================
# Performance Tuning (Optional)
# =============================================================================
OMP_NUM_THREADS=4
MKL_NUM_THREADS=4
# =============================================================================
# Processing Configuration (Optional)
# =============================================================================
POLL_INTERVAL=5
HTTP_TIMEOUT=30
WEBDAV_MAX_RETRIES=3
DOWNLOAD_CHUNK_SIZE=65536
# =============================================================================
# Logging (Optional)
# =============================================================================
LOG_FILE=logs/app.log

7
.gitattributes vendored Normal file
View File

@@ -0,0 +1,7 @@
*.mp3 filter=lfs diff=lfs merge=lfs -text
*.m4a filter=lfs diff=lfs merge=lfs -text
*.pdf filter=lfs diff=lfs merge=lfs -text
*.blob filter=lfs diff=lfs merge=lfs -text
pdf_test/imperio5.pdf !text !filter !merge !diff
pdf_test/imperio5_ocr.pdf !text !filter !merge !diff
downloads/* !text !filter !merge !diff

87
.gitignore vendored Executable file
View File

@@ -0,0 +1,87 @@
.env.secrets
.env.local
.env
# Python cache
__pycache__/
*.pyc
.venv/
# Application-generated data
downloads/
resumenes/
resumenes_docx/
processed_files.txt
*_unificado.docx
resumen_*.md
downloads/**/*.md
downloads/**/*.docx
resumenes_docx/**/*.docx
resumenes_docx/**/*.md
resumenes/**/*.md
resumenes/**/*.docx
# Node.js
.npm/
# Logs
logs/
*.log
# Test files
pdf_test/
cereal*.txt
test_*.py
docker-compose.test.yml
Dockerfile.test
requirements_summaries.txt
# Runtime state
.main_service.lock
cbc-main.pid
*.pid
*.db
# System files
.docker/buildx/
.dotnet/
.gemini/
.ssh/
.sudo_as_admin_successful
# IDE and editor files
.vscode/
.idea/
*.swp
*.swo
*~
# OS generated files
.DS_Store
.DS_Store?
._*
.Spotlight-V100
.Trashes
ehthumbs.db
Thumbs.db
.aider*
# Temporary files from restoration
old/
imperio/
check_models.py
compare_configs.py
# LaTeX auxiliary files
*.aux
*.toc
*.out
*.synctex.gz
*.fls
*.fdb_latexmk
# Generated PDFs (keep source .tex files)
*.pdf
# macOS specific
mac/

220
README.md Normal file
View File

@@ -0,0 +1,220 @@
# 🎵 CBCFacil v9
Sistema de IA para procesamiento inteligente de documentos (audio, PDF, texto) con integración a Nextcloud y dashboard web interactivo.
## ✨ Características Principales
- 🎙️ **Transcripción de Audio** - Whisper con soporte GPU/CPU
- 📝 **Generación de Resúmenes** - Claude AI o Gemini
- 📄 **Múltiples Formatos** - Genera TXT, MD, DOCX, PDF
- ☁️ **Sincronización Nextcloud** - Descarga y sube automáticamente
- 🖥️ **Dashboard Web** - Monitoreo y regeneración de resúmenes
- 📱 **Notificaciones Telegram** - Alertas en tiempo real
- 🔄 **Reprocesamiento** - Regenera resúmenes sin re-transcribir
## 🏗️ Arquitectura
```
┌─────────────────────────────────────────────────────────────────────────┐
│ CBCFacil v9 │
├─────────────────────────────────────────────────────────────────────────┤
│ │
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
│ │ Nextcloud │────▶│ Processor │────▶│ AI Service │ │
│ │ (WebDAV) │ │Audio/PDF/TXT│ │Claude/Gemini│ │
│ └─────────────┘ └─────────────┘ └─────────────┘ │
│ │ │ │ │
│ │ ▼ ▼ │
│ │ ┌─────────────┐ ┌─────────────┐ │
│ │ │ Whisper │ │ Document │ │
│ │ │ (GPU) │ │ Generator │ │
│ │ └─────────────┘ └─────────────┘ │
│ │ │ │
│ ▼ ▼ │
│ ┌─────────────────────────────────────────────────────┐ │
│ │ Dashboard Web (Flask) │ │
│ │ • Vista de archivos • Regenerar resúmenes │ │
│ │ • Panel de versiones • Previsualización │ │
│ └─────────────────────────────────────────────────────┘ │
│ │ │
│ ▼ │
│ ┌─────────────┐ │
│ │ Telegram │ │
│ │ (Notifica) │ │
│ └─────────────┘ │
│ │
└─────────────────────────────────────────────────────────────────────────┘
```
## 📁 Estructura del Proyecto
```
cbcfacil/
├── main.py # Punto de entrada principal
├── config/
│ └── settings.py # Configuración centralizada
├── services/
│ ├── webdav_service.py # Cliente WebDAV/Nextcloud
│ ├── vram_manager.py # Gestión memoria GPU
│ ├── telegram_service.py # Notificaciones
│ └── ai/
│ ├── claude_provider.py # Provider Claude (Z.ai)
│ ├── gemini_provider.py # Provider Gemini
│ └── provider_factory.py
├── processors/
│ ├── audio_processor.py # Transcripción Whisper
│ ├── pdf_processor.py # OCR y extracción
│ └── text_processor.py # Clasificación
├── document/
│ └── generators.py # Genera DOCX, PDF, Markdown
├── storage/
│ └── processed_registry.py # Registro de procesados
├── api/
│ └── routes.py # API REST + Dashboard
├── templates/
│ └── index.html # Dashboard UI
└── downloads/ # Archivos descargados
```
## 🚀 Instalación
### Requisitos
- Python 3.10+
- NVIDIA GPU + CUDA 12.1+ (opcional, fallback a CPU)
- Nextcloud con WebDAV habilitado
### Instalación Rápida
```bash
# Clonar repositorio
git clone https://gitea.cbcren.online/renato97/cbcren2026.git
cd cbcren2026
# Crear entorno virtual
python3 -m venv .venv
source .venv/bin/activate
# Instalar dependencias
pip install -r requirements.txt
# Configurar
cp .env.example .env
nano .env # Editar con tus credenciales
# Ejecutar
python3 main.py
```
## ⚙️ Configuración
### Variables de Entorno (.env)
```bash
# === NEXTCLOUD/WEBDAV ===
NEXTCLOUD_URL=https://tu-nextcloud.com/remote.php/webdav
NEXTCLOUD_USER=usuario
NEXTCLOUD_PASSWORD=contraseña
# === AI PROVIDERS ===
GEMINI_API_KEY=AIza... # Para resúmenes con Gemini
# o
ANTHROPIC_AUTH_TOKEN=sk-ant-... # Para resúmenes con Claude
# === TELEGRAM (Opcional) ===
TELEGRAM_TOKEN=bot_token
TELEGRAM_CHAT_ID=chat_id
# === DASHBOARD ===
DASHBOARD_HOST=0.0.0.0
DASHBOARD_PORT=5000
```
## 🖥️ Dashboard Web
El dashboard se ejecuta en `http://localhost:5000` junto con el servicio principal.
### Funcionalidades
| Característica | Descripción |
|----------------|-------------|
| 📊 **Vista de Archivos** | Lista todos los archivos de audio con estado |
| 🔍 **Búsqueda y Filtros** | Filtra por local/WebDAV, ordena por fecha/nombre |
| 👁️ **Panel de Preview** | Visualiza transcripciones y resúmenes |
| 📁 **Tab Versiones** | Lista todos los formatos generados (TXT, MD, DOCX, PDF) |
| ✨ **Regenerar Resumen** | Genera nueva versión del resumen con IA |
| 🔄 **Resetear Estado** | Marca archivo como no procesado |
### API REST Endpoints
```
GET /api/files # Lista archivos
GET /api/files-detailed # Lista con info de transcripciones
GET /api/transcription/<f> # Obtiene transcripción
GET /api/summary/<f> # Obtiene resumen
GET /api/versions/<f> # Lista versiones generadas
POST /api/regenerate-summary # Regenera resumen desde transcripción
POST /api/mark-unprocessed # Resetea estado de archivo
GET /health # Estado del servicio
```
## 🔄 Flujo de Procesamiento
1. **Detección** - El servicio monitorea Nextcloud cada 5 segundos
2. **Descarga** - Archivos nuevos se descargan localmente
3. **Transcripción** - Whisper convierte audio a texto (.txt)
4. **Resumen** - Claude/Gemini genera resumen estructurado
5. **Documentos** - Se generan .md, .docx, .pdf
6. **Subida** - Documentos se suben a Nextcloud
7. **Notificación** - Telegram notifica finalización
## 📱 Regenerar Resúmenes
Cuando un resumen no es satisfactorio, puedes regenerarlo:
### Desde el Dashboard
1. Clic en el archivo procesado
2. Panel lateral se abre con transcripción/resumen
3. Clic en "✨ Regenerar" o tab "📁 Versiones"
4. Nueva versión se genera y reemplaza la anterior
### Desde la Lista
- Archivos procesados muestran botón "✨ Regenerar"
- Clic directo sin abrir panel lateral
## 🛠️ Uso CLI
```bash
# Ejecutar servicio completo
python3 main.py
# Transcribir audio específico
python3 main.py whisper archivo.mp3 ./output/
# Procesar PDF específico
python3 main.py pdf documento.pdf ./output/
```
## 📊 Métricas
| Componente | Performance |
|------------|-------------|
| Transcripción Whisper | ~1x duración audio (GPU) |
| Resumen Gemini | ~5-15s por documento |
| OCR PDF | ~2-5s por página |
| Inicio del servicio | ~5-10s |
## 🔧 Tecnologías
- **Backend**: Python 3.10+, Flask
- **IA**: OpenAI Whisper, Google Gemini, Anthropic Claude
- **Frontend**: HTML5, CSS3, JavaScript (Vanilla)
- **Storage**: Nextcloud (WebDAV)
- **GPU**: CUDA, PyTorch
## 📝 Licencia
MIT License
---
**Desarrollado por CBC** | Última actualización: Enero 2026

255
amd/rocm_stress_test.py Executable file
View File

@@ -0,0 +1,255 @@
#!/usr/bin/env python3
"""
🔥 ROCm Stress Test - Prueba de resistencia para GPU AMD
Ejecuta operaciones intensivas durante 2 minutos y monitorea métricas
"""
import torch
import time
import sys
from datetime import datetime
def print_header():
print("=" * 70)
print("🔥 ROCm STRESS TEST - AMD GPU STRESS TEST")
print("=" * 70)
print(f"Inicio: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
print(f"GPU: {torch.cuda.get_device_name(0) if torch.cuda.is_available() else 'N/A'}")
print(f"VRAM Total: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.2f} GB")
print("=" * 70)
print()
def get_gpu_stats():
"""Obtener estadísticas actuales de la GPU"""
if not torch.cuda.is_available():
return None
props = torch.cuda.get_device_properties(0)
mem_allocated = torch.cuda.memory_allocated(0) / 1024**3
mem_reserved = torch.cuda.memory_reserved(0) / 1024**3
mem_total = props.total_memory / 1024**3
return {
'mem_allocated': mem_allocated,
'mem_reserved': mem_reserved,
'mem_total': mem_total,
'mem_percent': (mem_allocated / mem_total) * 100
}
def stress_test(duration_seconds=120):
"""Ejecutar stress test durante duración especificada"""
print(f"🧪 Iniciando stress test por {duration_seconds} segundos...")
print(f" Presiona Ctrl+C para detener en cualquier momento\n")
if not torch.cuda.is_available():
print("❌ ERROR: CUDA/ROCm no está disponible!")
sys.exit(1)
device = torch.device("cuda")
torch.cuda.set_per_process_memory_fraction(0.85, 0) # Usar 85% de VRAM
# Inicializar
results = {
'matmul_times': [],
'conv_times': [],
'reLU_times': [],
'softmax_times': [],
'iterations': 0,
'errors': 0
}
start_time = time.time()
iteration = 0
last_print = 0
try:
while time.time() - start_time < duration_seconds:
iteration += 1
results['iterations'] = iteration
try:
# Operaciones intensivas de ML
# 1. Matriz multiplicación (varía el tamaño)
if iteration % 5 == 0:
size = 8192 # Matriz grande ocasionalmente
elif iteration % 3 == 0:
size = 4096
else:
size = 2048
a = torch.randn(size, size, device=device, dtype=torch.float16)
b = torch.randn(size, size, device=device, dtype=torch.float16)
torch.cuda.synchronize()
t0 = time.time()
c = torch.matmul(a, b)
torch.cuda.synchronize()
matmul_time = time.time() - t0
results['matmul_times'].append(matmul_time)
del a, b, c
# 2. Convolución 3D
x = torch.randn(32, 128, 64, 64, 64, device=device)
conv = torch.nn.Conv3d(128, 256, kernel_size=3, padding=1).to(device)
torch.cuda.synchronize()
t0 = time.time()
out = conv(x)
torch.cuda.synchronize()
conv_time = time.time() - t0
results['conv_times'].append(conv_time)
# 3. ReLU + BatchNorm
bn = torch.nn.BatchNorm2d(256).to(device)
torch.cuda.synchronize()
t0 = time.time()
out = bn(torch.relu(out))
torch.cuda.synchronize()
relu_time = time.time() - t0
results['reLU_times'].append(relu_time)
del x, out
# 4. Softmax grande
x = torch.randn(2048, 2048, device=device)
torch.cuda.synchronize()
t0 = time.time()
softmax_out = torch.softmax(x, dim=-1)
torch.cuda.synchronize()
softmax_time = time.time() - t0
results['softmax_times'].append(softmax_time)
del softmax_out
except Exception as e:
results['errors'] += 1
if results['errors'] < 5: # Solo mostrar primeros errores
print(f"\n⚠️ Error en iteración {iteration}: {str(e)[:50]}")
# Progress cada segundo
elapsed = time.time() - start_time
if elapsed - last_print >= 1.0 or elapsed >= duration_seconds:
last_print = elapsed
progress = (elapsed / duration_seconds) * 100
# Stats
stats = get_gpu_stats()
matmul_avg = sum(results['matmul_times'][-10:]) / len(results['matmul_times'][-10:]) if results['matmul_times'] else 0
print(f"\r⏱️ Iteración {iteration:4d} | "
f"Tiempo: {elapsed:6.1f}s/{duration_seconds}s [{progress:5.1f}%] | "
f"VRAM: {stats['mem_allocated']:5.2f}GB/{stats['mem_total']:.2f}GB ({stats['mem_percent']:5.1f}%) | "
f"MatMul avg: {matmul_avg*1000:6.2f}ms | "
f"Iter/s: {iteration/elapsed:5.2f}",
end='', flush=True)
# Pequeña pausa para evitar sobrecarga
time.sleep(0.05)
except KeyboardInterrupt:
print("\n\n⏹️ Interrumpido por el usuario")
elapsed = time.time() - start_time
print(f" Duración real: {elapsed:.1f} segundos")
print(f" Iteraciones: {iteration}")
print("\n")
return results
def print_summary(results, duration):
"""Imprimir resumen de resultados"""
print("\n" + "=" * 70)
print("📊 RESUMEN DEL STRESS TEST")
print("=" * 70)
print(f"Duración total: {duration:.2f} segundos")
print(f"Iteraciones completadas: {results['iterations']}")
print(f"Errores: {results['errors']}")
print()
if results['matmul_times']:
matmul_avg = sum(results['matmul_times']) / len(results['matmul_times'])
matmul_min = min(results['matmul_times'])
matmul_max = max(results['matmul_times'])
matmul_last10_avg = sum(results['matmul_times'][-10:]) / len(results['matmul_times'][-10:])
print(f"🔢 MATRIZ MULTIPLICACIÓN (2048-8192)")
print(f" Promedio: {matmul_avg*1000:8.2f} ms")
print(f" Últimas 10: {matmul_last10_avg*1000:8.2f} ms")
print(f" Mínimo: {matmul_min*1000:8.2f} ms")
print(f" Máximo: {matmul_max*1000:8.2f} ms")
print()
if results['conv_times']:
conv_avg = sum(results['conv_times']) / len(results['conv_times'])
conv_min = min(results['conv_times'])
conv_max = max(results['conv_times'])
print(f"🧮 CONVOLUCIÓN 3D (32x128x64³)")
print(f" Promedio: {conv_avg*1000:8.2f} ms")
print(f" Mínimo: {conv_min*1000:8.2f} ms")
print(f" Máximo: {conv_max*1000:8.2f} ms")
print()
if results['reLU_times']:
relu_avg = sum(results['reLU_times']) / len(results['reLU_times'])
relu_min = min(results['reLU_times'])
relu_max = max(results['reLU_times'])
print(f"⚡ ReLU + BatchNorm")
print(f" Promedio: {relu_avg*1000:8.4f} ms")
print(f" Mínimo: {relu_min*1000:8.4f} ms")
print(f" Máximo: {relu_max*1000:8.4f} ms")
print()
if results['softmax_times']:
softmax_avg = sum(results['softmax_times']) / len(results['softmax_times'])
softmax_min = min(results['softmax_times'])
softmax_max = max(results['softmax_times'])
print(f"🔥 Softmax (2048x2048)")
print(f" Promedio: {softmax_avg*1000:8.2f} ms")
print(f" Mínimo: {softmax_min*1000:8.2f} ms")
print(f" Máximo: {softmax_max*1000:8.2f} ms")
print()
# Performance score
total_ops = results['iterations']
if total_ops > 0 and duration > 0:
print("=" * 70)
print(f"✅ TEST COMPLETADO")
print(f" 📈 {total_ops} operaciones en {duration:.1f} segundos")
print(f"{total_ops/duration:.2f} operaciones/segundo")
print(f" 💾 Uso de VRAM: Hasta ~85% (configurado)")
print("=" * 70)
print()
# Calcular GFLOPS aproximado para matmul
if results['matmul_times']:
# GFLOPS = 2 * n^3 / (time * 10^9) para matriz n x n
avg_matmul_ms = (sum(results['matmul_times']) / len(results['matmul_times'])) * 1000
avg_n = sum([2048 if i%3==0 else 4096 if i%5==0 else 2048 for i in range(len(results['matmul_times']))]) / len(results['matmul_times'])
gflops = (2 * (avg_n**3)) / (avg_matmul_ms / 1000) / 1e9
print(f"🚀 RENDIMIENTO ESTIMADO")
print(f" ~{gflops:.2f} GFLOPS (matriz multiplicación)")
def main():
print_header()
# Verificar ROCm
if not torch.cuda.is_available():
print("❌ ERROR: ROCm/CUDA no está disponible!")
print(" Ejecuta: export HSA_OVERRIDE_GFX_VERSION=10.3.0")
sys.exit(1)
# Ejecutar stress test
duration = 120 # 2 minutos
start = time.time()
results = stress_test(duration)
actual_duration = time.time() - start
# Mostrar resumen
print_summary(results, actual_duration)
# Limpiar
torch.cuda.empty_cache()
print("🧹 Cache de GPU limpiado")
print("\n✅ Stress test finalizado")
if __name__ == "__main__":
main()

7
api/__init__.py Normal file
View File

@@ -0,0 +1,7 @@
"""
API package for CBCFacil
"""
from .routes import create_app
__all__ = ['create_app']

746
api/routes.py Normal file
View File

@@ -0,0 +1,746 @@
"""
Flask API routes for CBCFacil dashboard
"""
import os
import time
from datetime import datetime
from pathlib import Path
from typing import Dict, Any, List
from flask import Flask, render_template, request, jsonify, send_from_directory
from flask_cors import CORS
from config import settings
from storage.processed_registry import processed_registry
from services.webdav_service import webdav_service
from services import vram_manager
from document.generators import DocumentGenerator
def create_app() -> Flask:
"""Create and configure Flask application"""
# Get the project root directory (parent of api/)
current_dir = Path(__file__).parent
project_root = current_dir.parent
template_dir = project_root / 'templates'
app = Flask(__name__, template_folder=str(template_dir))
CORS(app)
# Configure app
app.config['SECRET_KEY'] = settings.DASHBOARD_SECRET_KEY or os.urandom(24)
app.config['DOWNLOADS_FOLDER'] = str(settings.LOCAL_DOWNLOADS_PATH)
@app.route('/')
def index():
"""Dashboard home page"""
return render_template('index.html')
@app.route('/api/files')
def get_files():
"""Get list of audio files"""
try:
files = get_audio_files()
return jsonify({
'success': True,
'files': files,
'total': len(files),
'processed': sum(1 for f in files if f['processed']),
'pending': sum(1 for f in files if not f['processed'])
})
except Exception as e:
app.logger.error(f"Error getting files: {e}")
return jsonify({
'success': False,
'message': f"Error: {str(e)}"
}), 500
@app.route('/api/reprocess', methods=['POST'])
def reprocess_file():
"""Reprocess a file"""
try:
data = request.get_json()
file_path = data.get('path')
source = data.get('source', 'local')
if not file_path:
return jsonify({
'success': False,
'message': "Path del archivo es requerido"
}), 400
# TODO: Implement file reprocessing
# This would trigger the main processing loop
return jsonify({
'success': True,
'message': f"Archivo {Path(file_path).name} enviado a reprocesamiento"
})
except Exception as e:
app.logger.error(f"Error reprocessing file: {e}")
return jsonify({
'success': False,
'message': f"Error: {str(e)}"
}), 500
@app.route('/api/mark-unprocessed', methods=['POST'])
def mark_unprocessed():
"""Mark file as unprocessed"""
try:
data = request.get_json()
file_path = data.get('path')
if not file_path:
return jsonify({
'success': False,
'message': "Path del archivo es requerido"
}), 400
success = processed_registry.remove(file_path)
if success:
return jsonify({
'success': True,
'message': "Archivo marcado como no procesado"
})
else:
return jsonify({
'success': False,
'message': "No se pudo marcar como no procesado"
}), 500
except Exception as e:
app.logger.error(f"Error marking unprocessed: {e}")
return jsonify({
'success': False,
'message': f"Error: {str(e)}"
}), 500
@app.route('/api/refresh')
def refresh_files():
"""Refresh file list"""
try:
processed_registry.load()
files = get_audio_files()
return jsonify({
'success': True,
'message': "Lista de archivos actualizada",
'files': files
})
except Exception as e:
app.logger.error(f"Error refreshing files: {e}")
return jsonify({
'success': False,
'message': f"Error: {str(e)}"
}), 500
@app.route('/downloads/<path:filename>')
def download_file(filename):
"""Download file"""
try:
# Validate path to prevent traversal and injection attacks
normalized = Path(filename).resolve()
base_downloads = Path(str(settings.LOCAL_DOWNLOADS_PATH)).resolve()
base_docx = Path(str(settings.LOCAL_DOCX)).resolve()
if '..' in filename or filename.startswith('/') or \
normalized.parts[0] in ['..', '...'] if len(normalized.parts) > 0 else False or \
not (normalized == base_downloads or normalized.is_relative_to(base_downloads) or
normalized == base_docx or normalized.is_relative_to(base_docx)):
return jsonify({'error': 'Invalid filename'}), 400
# Try downloads directory
downloads_path = settings.LOCAL_DOWNLOADS_PATH / filename
if downloads_path.exists():
return send_from_directory(str(settings.LOCAL_DOWNLOADS_PATH), filename)
# Try resumenes_docx directory
docx_path = settings.LOCAL_DOCX / filename
if docx_path.exists():
return send_from_directory(str(settings.LOCAL_DOCX), filename)
return jsonify({'error': 'File not found'}), 404
except Exception as e:
app.logger.error(f"Error downloading file: {e}")
return jsonify({'error': 'File not found'}), 404
@app.route('/downloads/find-file')
def find_and_download_file():
"""Find and download file with various name variants"""
try:
filename = request.args.get('filename', '')
ext = request.args.get('ext', 'txt')
if not filename:
return jsonify({'error': 'Filename required'}), 400
# Validate to prevent path traversal
if '..' in filename or filename.startswith('/'):
return jsonify({'error': 'Invalid filename'}), 400
# Try various name variants
base_name = filename.replace('_unificado', '').replace('_unified', '')
name_variants = [
f"{base_name}.{ext}",
f"{base_name}_unificado.{ext}",
f"{base_name}_unified.{ext}",
f"{base_name.replace(' ', '_')}.{ext}",
f"{base_name.replace(' ', '_')}_unificado.{ext}",
]
# Directories to search
directories = [
settings.LOCAL_DOWNLOADS_PATH,
settings.LOCAL_DOCX
]
# Search for file
for directory in directories:
if not directory.exists():
continue
for variant in name_variants:
file_path = directory / variant
if file_path.exists():
# Determinar mimetype para que se abra en el navegador
mimetype = None
if ext == 'pdf':
mimetype = 'application/pdf'
elif ext == 'md':
mimetype = 'text/markdown'
elif ext == 'txt':
mimetype = 'text/plain'
# as_attachment=False para abrir en navegador, no descargar
return send_from_directory(str(directory), variant, as_attachment=False, mimetype=mimetype)
return jsonify({'error': f'File not found: {filename}.{ext}'}), 404
except Exception as e:
app.logger.error(f"Error finding file: {e}")
return jsonify({'error': 'File not found'}), 404
@app.route('/health')
def health_check():
"""Health check endpoint"""
gpu_info = vram_manager.get_usage()
return jsonify({
'status': 'healthy',
'timestamp': datetime.now().isoformat(),
'processed_files_count': processed_registry.count(),
'gpu': gpu_info,
'config': {
'webdav_configured': settings.has_webdav_config,
'ai_configured': settings.has_ai_config,
'debug': settings.DEBUG
}
})
@app.route('/api/transcription/<filename>')
def get_transcription(filename: str):
"""Get transcription content for a specific file"""
try:
# Validate filename to prevent path traversal
if '..' in filename or filename.startswith('/'):
return jsonify({
'success': False,
'message': 'Invalid filename'
}), 400
# Extract base name without extension (handle .mp3, .wav, .txt, etc.)
base_name = Path(filename).stem
# Construct file path for transcription
file_path = settings.LOCAL_DOWNLOADS_PATH / f"{base_name}.txt"
# Check if file exists
if not file_path.exists():
return jsonify({
'success': False,
'message': f'Transcription file not found: {base_name}.txt'
}), 404
# Read file content
with open(file_path, 'r', encoding='utf-8') as f:
transcription_text = f.read()
# Calculate statistics
word_count = len(transcription_text.split())
char_count = len(transcription_text)
return jsonify({
'success': True,
'filename': filename,
'transcription': transcription_text,
'file_path': str(file_path),
'word_count': word_count,
'char_count': char_count
})
except Exception as e:
app.logger.error(f"Error reading transcription: {e}")
return jsonify({
'success': False,
'message': f"Error reading transcription: {str(e)}"
}), 500
@app.route('/api/summary/<filename>')
def get_summary(filename: str):
"""Get summary content for a specific file"""
try:
# Validate filename to prevent path traversal
if '..' in filename or filename.startswith('/'):
return jsonify({
'success': False,
'message': 'Invalid filename'
}), 400
# Extract base name without extension (handle .mp3, .wav, etc.)
base_name = Path(filename).stem
# Also remove _unificado/_unified suffixes if present
base_name = base_name.replace('_unificado', '').replace('_unified', '')
# Try different file path variants
possible_paths = [
settings.LOCAL_DOWNLOADS_PATH / f"{base_name}_unificado.md",
settings.LOCAL_DOWNLOADS_PATH / f"{base_name}_unified.md",
settings.LOCAL_DOWNLOADS_PATH / f"{base_name}.md",
]
file_path = None
for path in possible_paths:
if path.exists():
file_path = path
break
if not file_path:
return jsonify({
'success': False,
'message': f'Summary file not found for: {filename}'
}), 404
# Read file content
with open(file_path, 'r', encoding='utf-8') as f:
summary_text = f.read()
# Get available formats
formats_available = get_available_formats(base_name)
return jsonify({
'success': True,
'filename': base_name,
'summary': summary_text,
'file_path': str(file_path),
'formats_available': formats_available
})
except Exception as e:
app.logger.error(f"Error reading summary: {e}")
return jsonify({
'success': False,
'message': f"Error reading summary: {str(e)}"
}), 500
@app.route('/api/versions/<filename>')
def get_versions(filename: str):
"""Get all summary versions for a file"""
try:
# Validate filename
if '..' in filename or filename.startswith('/'):
return jsonify({'success': False, 'message': 'Invalid filename'}), 400
# Extract base name
base_name = Path(filename).stem
base_name = base_name.replace('_unificado', '').replace('_unified', '')
versions = []
downloads_path = settings.LOCAL_DOWNLOADS_PATH
docx_path = settings.LOCAL_DOCX
# Check for transcription (original)
txt_path = downloads_path / f"{base_name}.txt"
if txt_path.exists():
stat = txt_path.stat()
versions.append({
'type': 'transcription',
'label': '📝 Transcripción Original',
'filename': txt_path.name,
'path': f"/downloads/find-file?filename={base_name}&ext=txt",
'date': datetime.fromtimestamp(stat.st_mtime).strftime('%Y-%m-%d %H:%M'),
'size': f"{stat.st_size / 1024:.1f} KB"
})
# Check for summary versions (md, docx, pdf)
summary_patterns = [
(f"{base_name}_unificado.md", "📋 Resumen MD"),
(f"{base_name}_unificado.docx", "📄 Documento DOCX"),
(f"{base_name}_unificado.pdf", "📑 PDF"),
]
for pattern, label in summary_patterns:
# Check downloads path
file_path = downloads_path / pattern
if not file_path.exists():
file_path = docx_path / pattern
if file_path.exists():
stat = file_path.stat()
ext = file_path.suffix[1:] # Remove the dot
versions.append({
'type': 'summary',
'label': label,
'filename': pattern,
'path': f"/downloads/find-file?filename={base_name}&ext={ext}",
'date': datetime.fromtimestamp(stat.st_mtime).strftime('%Y-%m-%d %H:%M'),
'size': f"{stat.st_size / 1024:.1f} KB"
})
# Sort by date descending
versions.sort(key=lambda x: x['date'], reverse=True)
return jsonify({
'success': True,
'base_name': base_name,
'versions': versions,
'count': len(versions)
})
except Exception as e:
app.logger.error(f"Error getting versions: {e}")
return jsonify({'success': False, 'message': str(e)}), 500
@app.route('/api/regenerate-summary', methods=['POST'])
def regenerate_summary():
"""Regenerate summary from existing transcription"""
start_time = time.time()
try:
data = request.get_json()
filename = data.get('filename')
custom_prompt = data.get('custom_prompt')
if not filename:
return jsonify({
'success': False,
'message': 'Filename is required'
}), 400
# Validate filename to prevent path traversal
if '..' in filename or filename.startswith('/'):
return jsonify({
'success': False,
'message': 'Invalid filename'
}), 400
# Get base name (remove extension if present)
base_name = Path(filename).stem
# Read transcription from .txt file
transcription_path = settings.LOCAL_DOWNLOADS_PATH / f"{base_name}.txt"
if not transcription_path.exists():
# Try without .txt extension if already included
transcription_path = settings.LOCAL_DOWNLOADS_PATH / filename
if not transcription_path.exists():
return jsonify({
'success': False,
'message': f'Transcription file not found for: {filename}'
}), 404
with open(transcription_path, 'r', encoding='utf-8') as f:
transcription_text = f.read()
# Generate new summary using DocumentGenerator
doc_generator = DocumentGenerator()
success, new_summary, metadata = doc_generator.generate_summary(
transcription_text,
base_name
)
if not success:
return jsonify({
'success': False,
'message': 'Failed to generate summary'
}), 500
# Upload to WebDAV if configured
files_updated = []
if settings.has_webdav_config:
try:
# Upload markdown
if 'markdown_path' in metadata:
md_path = Path(metadata['markdown_path'])
if md_path.exists():
remote_md_path = f"{settings.REMOTE_TXT_FOLDER}/{md_path.name}"
webdav_service.upload(str(md_path), remote_md_path)
files_updated.append(remote_md_path)
# Upload DOCX
if 'docx_path' in metadata:
docx_path = Path(metadata['docx_path'])
if docx_path.exists():
remote_docx_path = f"{settings.DOCX_FOLDER}/{docx_path.name}"
webdav_service.upload(str(docx_path), remote_docx_path)
files_updated.append(remote_docx_path)
# Upload PDF if available
if 'pdf_path' in metadata:
pdf_path = Path(metadata['pdf_path'])
if pdf_path.exists():
remote_pdf_path = f"{settings.REMOTE_PDF_FOLDER}/{pdf_path.name}"
webdav_service.upload(str(pdf_path), remote_pdf_path)
files_updated.append(remote_pdf_path)
except Exception as e:
app.logger.warning(f"WebDAV upload failed: {e}")
# Continue even if upload fails
processing_time = time.time() - start_time
return jsonify({
'success': True,
'message': 'Summary regenerated successfully',
'new_summary': new_summary,
'files_updated': files_updated,
'processing_time': f"{processing_time:.2f}s",
'metadata': metadata
})
except Exception as e:
app.logger.error(f"Error regenerating summary: {e}")
return jsonify({
'success': False,
'message': f"Error regenerating summary: {str(e)}"
}), 500
@app.route('/api/files-detailed')
def get_files_detailed():
"""Get detailed list of files with transcription and summary info"""
try:
files = get_audio_files_detailed()
# Calculate statistics
total = len(files)
with_transcription = sum(1 for f in files if f['has_transcription'])
with_summary = sum(1 for f in files if f['has_summary'])
return jsonify({
'success': True,
'files': files,
'total': total,
'with_transcription': with_transcription,
'with_summary': with_summary
})
except Exception as e:
app.logger.error(f"Error getting detailed files: {e}")
return jsonify({
'success': False,
'message': f"Error: {str(e)}"
}), 500
return app
def get_audio_files() -> List[Dict[str, Any]]:
"""Get list of audio files from WebDAV and local"""
import logging
logger = logging.getLogger(__name__)
files = []
# Get files from WebDAV
if settings.has_webdav_config:
try:
webdav_files = webdav_service.list(settings.REMOTE_AUDIOS_FOLDER)
for file_path in webdav_files:
normalized_path = webdav_service.normalize_path(file_path)
base_name = Path(normalized_path).name
if any(normalized_path.lower().endswith(ext) for ext in settings.AUDIO_EXTENSIONS):
is_processed = processed_registry.is_processed(normalized_path)
files.append({
'filename': base_name,
'path': normalized_path,
'source': 'webdav',
'processed': is_processed,
'size': 'Unknown',
'last_modified': 'Unknown',
'available_formats': get_available_formats(base_name)
})
except Exception as e:
logger.warning(f"Error getting WebDAV files: {e}")
# Get local files
try:
if settings.LOCAL_DOWNLOADS_PATH.exists():
for ext in settings.AUDIO_EXTENSIONS:
for file_path in settings.LOCAL_DOWNLOADS_PATH.glob(f"*{ext}"):
stat = file_path.stat()
is_processed = processed_registry.is_processed(file_path.name)
files.append({
'filename': file_path.name,
'path': str(file_path),
'source': 'local',
'processed': is_processed,
'size': format_size(stat.st_size),
'last_modified': datetime.fromtimestamp(stat.st_mtime).strftime('%Y-%m-%d %H:%M:%S'),
'available_formats': get_available_formats(file_path.name)
})
except Exception as e:
logger.error(f"Error getting local files: {e}")
# Remove duplicates (keep both local and webdav - distinguish by source)
unique_files = {}
for file in files:
# Use (filename, source) as key to keep both local and webdav files
key = (file['filename'], file['source'])
unique_files[key] = file
return sorted(unique_files.values(), key=lambda x: (x['source'], x['filename']))
def get_audio_files_detailed() -> List[Dict[str, Any]]:
"""Get detailed list of audio files with transcription and summary information"""
files = []
# Get local audio files only for detailed view
try:
if settings.LOCAL_DOWNLOADS_PATH.exists():
for ext in settings.AUDIO_EXTENSIONS:
for file_path in settings.LOCAL_DOWNLOADS_PATH.glob(f"*{ext}"):
stat = file_path.stat()
filename = file_path.name
base_name = file_path.stem
# Check for transcription
transcription_path = settings.LOCAL_DOWNLOADS_PATH / f"{base_name}.txt"
has_transcription = transcription_path.exists()
transcription_words = 0
if has_transcription:
try:
with open(transcription_path, 'r', encoding='utf-8') as f:
transcription_text = f.read()
transcription_words = len(transcription_text.split())
except Exception:
pass
# Check for summary and formats
formats = get_available_formats(filename)
has_summary = formats.get('md', False)
# Get summary path
summary_path = None
if has_summary:
summary_variants = [
settings.LOCAL_DOWNLOADS_PATH / f"{base_name}_unificado.md",
settings.LOCAL_DOWNLOADS_PATH / f"{base_name}_unified.md",
settings.LOCAL_DOWNLOADS_PATH / f"{base_name}.md",
]
for variant in summary_variants:
if variant.exists():
summary_path = str(variant)
break
files.append({
'filename': filename,
'base_name': base_name,
'audio_path': str(file_path),
'has_transcription': has_transcription,
'transcription_path': str(transcription_path) if has_transcription else None,
'transcription_words': transcription_words,
'has_summary': has_summary,
'summary_path': summary_path,
'formats': formats,
'processed': processed_registry.is_processed(filename),
'last_modified': datetime.fromtimestamp(stat.st_mtime).strftime('%Y-%m-%d %H:%M:%S'),
'size': format_size(stat.st_size)
})
except Exception as e:
pass # Error logged in endpoint
# Get WebDAV files
if settings.has_webdav_config:
try:
webdav_files = webdav_service.list(settings.REMOTE_AUDIOS_FOLDER)
for file_path in webdav_files:
normalized_path = webdav_service.normalize_path(file_path)
base_name = Path(normalized_path).stem
if any(normalized_path.lower().endswith(ext) for ext in settings.AUDIO_EXTENSIONS):
# Check if already in local files
if not any(f['base_name'] == base_name for f in files):
formats = get_available_formats(base_name)
files.append({
'filename': Path(normalized_path).name,
'base_name': base_name,
'audio_path': normalized_path,
'has_transcription': formats.get('txt', False),
'transcription_path': None,
'transcription_words': 0,
'has_summary': formats.get('md', False),
'summary_path': None,
'formats': formats,
'processed': processed_registry.is_processed(normalized_path),
'last_modified': 'Unknown',
'size': 'Unknown'
})
except Exception as e:
pass # Error logged in endpoint
# Remove duplicates and sort
unique_files = {}
for file in files:
key = file['base_name']
if key not in unique_files:
unique_files[key] = file
return sorted(unique_files.values(), key=lambda x: x['filename'])
def get_available_formats(audio_filename: str) -> Dict[str, bool]:
"""Check which output formats are available for an audio file"""
base_name = Path(audio_filename).stem
formats = {
'txt': False,
'md': False,
'pdf': False,
'docx': False
}
directories_to_check = [
settings.LOCAL_DOWNLOADS_PATH,
settings.LOCAL_DOCX
]
for directory in directories_to_check:
if not directory.exists():
continue
for ext in formats.keys():
name_variants = [
base_name,
f"{base_name}_unificado",
base_name.replace(' ', '_'),
f"{base_name.replace(' ', '_')}_unificado",
]
for name_variant in name_variants:
file_path = directory / f"{name_variant}.{ext}"
if file_path.exists():
formats[ext] = True
break
return formats
def format_size(size_bytes: int) -> str:
"""Format size in human-readable format"""
for unit in ['B', 'KB', 'MB', 'GB']:
if size_bytes < 1024.0:
return f"{size_bytes:.1f} {unit}"
size_bytes /= 1024.0
return f"{size_bytes:.1f} TB"

View File

@@ -0,0 +1,69 @@
"""
AI Service - Unified interface for AI providers
"""
import logging
from typing import Optional, Dict, Any
from .config import settings
from .core import AIProcessingError
from .services.ai.provider_factory import AIProviderFactory, ai_provider_factory
class AIService:
"""Unified service for AI operations with provider fallback"""
def __init__(self):
self.logger = logging.getLogger(__name__)
self._factory: Optional[AIProviderFactory] = None
@property
def factory(self) -> AIProviderFactory:
"""Lazy initialization of provider factory"""
if self._factory is None:
self._factory = ai_provider_factory
return self._factory
def generate_text(
self,
prompt: str,
provider: Optional[str] = None,
max_tokens: int = 4096
) -> str:
"""Generate text using AI provider"""
try:
ai_provider = self.factory.get_provider(provider or 'gemini')
return ai_provider.generate(prompt, max_tokens=max_tokens)
except AIProcessingError as e:
self.logger.error(f"AI generation failed: {e}")
return f"Error: {str(e)}"
def summarize(self, text: str, **kwargs) -> str:
"""Generate summary of text"""
try:
provider = self.factory.get_best_provider()
return provider.summarize(text, **kwargs)
except AIProcessingError as e:
self.logger.error(f"Summarization failed: {e}")
return f"Error: {str(e)}"
def correct_text(self, text: str, **kwargs) -> str:
"""Correct grammar and spelling in text"""
try:
provider = self.factory.get_best_provider()
return provider.correct_text(text, **kwargs)
except AIProcessingError as e:
self.logger.error(f"Text correction failed: {e}")
return text # Return original on error
def classify_content(self, text: str, **kwargs) -> Dict[str, Any]:
"""Classify content into categories"""
try:
provider = self.factory.get_best_provider()
return provider.classify_content(text, **kwargs)
except AIProcessingError as e:
self.logger.error(f"Classification failed: {e}")
return {"category": "otras_clases", "confidence": 0.0}
# Global instance
ai_service = AIService()

View File

@@ -0,0 +1,171 @@
"""
Gemini AI Provider implementation
"""
import logging
import subprocess
import shutil
import requests
import time
from typing import Dict, Any, Optional
from ..config import settings
from ..core import AIProcessingError
from .base_provider import AIProvider
class GeminiProvider(AIProvider):
"""Gemini AI provider using CLI or API"""
def __init__(self):
self.logger = logging.getLogger(__name__)
self._cli_path = settings.GEMINI_CLI_PATH or shutil.which("gemini")
self._api_key = settings.GEMINI_API_KEY
self._flash_model = settings.GEMINI_FLASH_MODEL
self._pro_model = settings.GEMINI_PRO_MODEL
self._session = None
@property
def name(self) -> str:
return "Gemini"
def is_available(self) -> bool:
"""Check if Gemini is available"""
return bool(self._cli_path or self._api_key)
def _run_cli(self, prompt: str, use_flash: bool = True, timeout: int = 300) -> str:
"""Run Gemini CLI with prompt"""
if not self._cli_path:
raise AIProcessingError("Gemini CLI not available")
model = self._flash_model if use_flash else self._pro_model
cmd = [self._cli_path, model, prompt]
try:
process = subprocess.run(
cmd,
text=True,
capture_output=True,
timeout=timeout,
shell=False
)
if process.returncode != 0:
error_msg = process.stderr or "Unknown error"
raise AIProcessingError(f"Gemini CLI failed: {error_msg}")
return process.stdout.strip()
except subprocess.TimeoutExpired:
raise AIProcessingError(f"Gemini CLI timed out after {timeout}s")
except Exception as e:
raise AIProcessingError(f"Gemini CLI error: {e}")
def _call_api(self, prompt: str, use_flash: bool = True, timeout: int = 180) -> str:
"""Call Gemini API"""
if not self._api_key:
raise AIProcessingError("Gemini API key not configured")
model = self._flash_model if use_flash else self._pro_model
# Initialize session if needed
if self._session is None:
self._session = requests.Session()
adapter = requests.adapters.HTTPAdapter(
pool_connections=10,
pool_maxsize=20
)
self._session.mount('https://', adapter)
url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent"
payload = {
"contents": [{
"parts": [{"text": prompt}]
}]
}
params = {"key": self._api_key}
try:
response = self._session.post(
url,
json=payload,
params=params,
timeout=timeout
)
response.raise_for_status()
data = response.json()
if "candidates" not in data or not data["candidates"]:
raise AIProcessingError("Empty response from Gemini API")
candidate = data["candidates"][0]
if "content" not in candidate or "parts" not in candidate["content"]:
raise AIProcessingError("Invalid response format from Gemini API")
result = candidate["content"]["parts"][0]["text"]
return result.strip()
except requests.RequestException as e:
raise AIProcessingError(f"Gemini API request failed: {e}")
except (KeyError, IndexError, ValueError) as e:
raise AIProcessingError(f"Gemini API response error: {e}")
def _run(self, prompt: str, use_flash: bool = True, timeout: int = 300) -> str:
"""Run Gemini with fallback between CLI and API"""
# Try CLI first if available
if self._cli_path:
try:
return self._run_cli(prompt, use_flash, timeout)
except Exception as e:
self.logger.warning(f"Gemini CLI failed, trying API: {e}")
# Fallback to API
if self._api_key:
api_timeout = timeout if timeout < 180 else 180
return self._call_api(prompt, use_flash, api_timeout)
raise AIProcessingError("No Gemini provider available (CLI or API)")
def summarize(self, text: str, **kwargs) -> str:
"""Generate summary using Gemini"""
prompt = f"""Summarize the following text:
{text}
Provide a clear, concise summary in Spanish."""
return self._run(prompt, use_flash=True)
def correct_text(self, text: str, **kwargs) -> str:
"""Correct text using Gemini"""
prompt = f"""Correct the following text for grammar, spelling, and clarity:
{text}
Return only the corrected text, nothing else."""
return self._run(prompt, use_flash=True)
def classify_content(self, text: str, **kwargs) -> Dict[str, Any]:
"""Classify content using Gemini"""
categories = ["historia", "analisis_contable", "instituciones_gobierno", "otras_clases"]
prompt = f"""Classify the following text into one of these categories:
- historia
- analisis_contable
- instituciones_gobierno
- otras_clases
Text: {text}
Return only the category name, nothing else."""
result = self._run(prompt, use_flash=True).lower()
# Validate result
if result not in categories:
result = "otras_clases"
return {
"category": result,
"confidence": 0.9,
"provider": self.name
}

View File

@@ -0,0 +1,137 @@
"""
Processed files registry using repository pattern
"""
import fcntl
import logging
from pathlib import Path
from typing import Set, Optional
from datetime import datetime, timedelta
from ..config import settings
class ProcessedRegistry:
"""Registry for tracking processed files with caching and file locking"""
def __init__(self):
self.logger = logging.getLogger(__name__)
self._cache: Set[str] = set()
self._cache_time: Optional[datetime] = None
self._cache_ttl = 60
self._initialized = False
def initialize(self) -> None:
"""Initialize the registry"""
self.load()
self._initialized = True
def load(self) -> Set[str]:
"""Load processed files from disk with caching"""
now = datetime.utcnow()
if self._cache and self._cache_time and (now - self._cache_time).total_seconds() < self._cache_ttl:
return self._cache.copy()
processed = set()
registry_path = settings.processed_files_path
try:
registry_path.parent.mkdir(parents=True, exist_ok=True)
if registry_path.exists():
with open(registry_path, 'r', encoding='utf-8') as f:
for raw_line in f:
line = raw_line.strip()
if line and not line.startswith('#'):
processed.add(line)
base_name = Path(line).name
processed.add(base_name)
except Exception as e:
self.logger.error(f"Error reading processed files registry: {e}")
self._cache = processed
self._cache_time = now
return processed.copy()
def save(self, file_path: str) -> None:
"""Add file to processed registry with file locking"""
if not file_path:
return
registry_path = settings.processed_files_path
try:
registry_path.parent.mkdir(parents=True, exist_ok=True)
with open(registry_path, 'a', encoding='utf-8') as f:
fcntl.flock(f.fileno(), fcntl.LOCK_EX)
try:
if file_path not in self._cache:
f.write(file_path + "\n")
self._cache.add(file_path)
self.logger.debug(f"Added {file_path} to processed registry")
finally:
fcntl.flock(f.fileno(), fcntl.LOCK_UN)
except Exception as e:
self.logger.error(f"Error saving to processed files registry: {e}")
raise
def is_processed(self, file_path: str) -> bool:
"""Check if file has been processed"""
if not self._initialized:
self.initialize()
if file_path in self._cache:
return True
basename = Path(file_path).name
if basename in self._cache:
return True
return False
def remove(self, file_path: str) -> bool:
"""Remove file from processed registry"""
registry_path = settings.processed_files_path
try:
if not registry_path.exists():
return False
lines_to_keep = []
with open(registry_path, 'r', encoding='utf-8') as f:
for line in f:
if line.strip() != file_path and Path(line.strip()).name != Path(file_path).name:
lines_to_keep.append(line)
with open(registry_path, 'w', encoding='utf-8') as f:
fcntl.flock(f.fileno(), fcntl.LOCK_EX)
try:
f.writelines(lines_to_keep)
self._cache.discard(file_path)
self._cache.discard(Path(file_path).name)
finally:
fcntl.flock(f.fileno(), fcntl.LOCK_UN)
return True
except Exception as e:
self.logger.error(f"Error removing from processed files registry: {e}")
return False
def clear(self) -> None:
"""Clear the entire registry"""
registry_path = settings.processed_files_path
try:
if registry_path.exists():
registry_path.unlink()
self._cache.clear()
self._cache_time = None
self.logger.info("Processed files registry cleared")
except Exception as e:
self.logger.error(f"Error clearing processed files registry: {e}")
raise
def get_all(self) -> Set[str]:
"""Get all processed files"""
if not self._initialized:
self.initialize()
return self._cache.copy()
def count(self) -> int:
"""Get count of processed files"""
if not self._initialized:
self.initialize()
return len(self._cache)
# Global instance
processed_registry = ProcessedRegistry()

8
config/__init__.py Normal file
View File

@@ -0,0 +1,8 @@
"""
Configuration package for CBCFacil
"""
from .settings import settings
from .validators import validate_environment
__all__ = ['settings', 'validate_environment']

261
config/settings.py Normal file
View File

@@ -0,0 +1,261 @@
"""
Centralized configuration management for CBCFacil
"""
import os
from pathlib import Path
from typing import Optional, Set, Union
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
class ConfigurationError(Exception):
"""Raised when configuration is invalid"""
pass
class Settings:
"""Application settings loaded from environment variables"""
# Application
APP_NAME: str = "CBCFacil"
APP_VERSION: str = "8.0"
DEBUG: bool = os.getenv("DEBUG", "false").lower() == "true"
# Nextcloud/WebDAV Configuration
NEXTCLOUD_URL: str = os.getenv("NEXTCLOUD_URL", "")
NEXTCLOUD_USER: str = os.getenv("NEXTCLOUD_USER", "")
NEXTCLOUD_PASSWORD: str = os.getenv("NEXTCLOUD_PASSWORD", "")
WEBDAV_ENDPOINT: str = NEXTCLOUD_URL
# Remote folders
REMOTE_AUDIOS_FOLDER: str = "Audios"
REMOTE_DOCX_AUDIO_FOLDER: str = "Documentos"
REMOTE_PDF_FOLDER: str = "Pdf"
REMOTE_TXT_FOLDER: str = "Textos"
RESUMENES_FOLDER: str = "Resumenes"
DOCX_FOLDER: str = "Documentos"
# Local paths
BASE_DIR: Path = Path(__file__).resolve().parent.parent
LOCAL_STATE_DIR: str = os.getenv("LOCAL_STATE_DIR", str(BASE_DIR))
LOCAL_DOWNLOADS_PATH: Path = BASE_DIR / "downloads"
LOCAL_RESUMENES: Path = LOCAL_DOWNLOADS_PATH
LOCAL_DOCX: Path = BASE_DIR / "resumenes_docx"
# Processing
POLL_INTERVAL: int = int(os.getenv("POLL_INTERVAL", "5"))
HTTP_TIMEOUT: int = int(os.getenv("HTTP_TIMEOUT", "30"))
WEBDAV_MAX_RETRIES: int = int(os.getenv("WEBDAV_MAX_RETRIES", "3"))
DOWNLOAD_CHUNK_SIZE: int = int(
os.getenv("DOWNLOAD_CHUNK_SIZE", "65536")
) # 64KB for better performance
MAX_FILENAME_LENGTH: int = int(os.getenv("MAX_FILENAME_LENGTH", "80"))
MAX_FILENAME_BASE_LENGTH: int = int(os.getenv("MAX_FILENAME_BASE_LENGTH", "40"))
MAX_FILENAME_TOPICS_LENGTH: int = int(os.getenv("MAX_FILENAME_TOPICS_LENGTH", "20"))
# File extensions
AUDIO_EXTENSIONS: Set[str] = {".mp3", ".wav", ".m4a", ".ogg", ".aac"}
PDF_EXTENSIONS: Set[str] = {".pdf"}
TXT_EXTENSIONS: Set[str] = {".txt"}
# AI Providers
ZAI_BASE_URL: str = os.getenv("ZAI_BASE_URL", "https://api.z.ai/api/anthropic")
ZAI_DEFAULT_MODEL: str = os.getenv("ZAI_MODEL", "glm-4.6")
ZAI_AUTH_TOKEN: Optional[str] = os.getenv("ANTHROPIC_AUTH_TOKEN") or os.getenv(
"ZAI_AUTH_TOKEN", ""
)
# Notion Integration
NOTION_API_TOKEN: Optional[str] = os.getenv("NOTION_API")
NOTION_DATABASE_ID: Optional[str] = os.getenv("NOTION_DATABASE_ID")
# Gemini
GEMINI_API_KEY: Optional[str] = os.getenv("GEMINI_API_KEY")
GEMINI_FLASH_MODEL: str = os.getenv("GEMINI_FLASH_MODEL", "gemini-2.5-flash")
GEMINI_PRO_MODEL: str = os.getenv("GEMINI_PRO_MODEL", "gemini-1.5-pro")
# CLI paths
GEMINI_CLI_PATH: Optional[str] = os.getenv("GEMINI_CLI_PATH")
CLAUDE_CLI_PATH: Optional[str] = os.getenv("CLAUDE_CLI_PATH")
# Telegram
TELEGRAM_TOKEN: Optional[str] = os.getenv("TELEGRAM_TOKEN")
TELEGRAM_CHAT_ID: Optional[str] = os.getenv("TELEGRAM_CHAT_ID")
# PDF Processing Configuration
CPU_COUNT: int = os.cpu_count() or 1
PDF_MAX_PAGES_PER_CHUNK: int = int(os.getenv("PDF_MAX_PAGES_PER_CHUNK", "2"))
PDF_DPI: int = int(os.getenv("PDF_DPI", "200"))
PDF_RENDER_THREAD_COUNT: int = int(
os.getenv("PDF_RENDER_THREAD_COUNT", str(min(4, CPU_COUNT)))
)
PDF_BATCH_SIZE: int = int(os.getenv("PDF_BATCH_SIZE", "2"))
PDF_TROCR_MAX_BATCH: int = int(
os.getenv("PDF_TROCR_MAX_BATCH", str(PDF_BATCH_SIZE))
)
PDF_TESSERACT_THREADS: int = int(
os.getenv("PDF_TESSERACT_THREADS", str(max(1, min(2, max(1, CPU_COUNT // 3)))))
)
PDF_PREPROCESS_THREADS: int = int(
os.getenv("PDF_PREPROCESS_THREADS", str(PDF_TESSERACT_THREADS))
)
PDF_TEXT_DETECTION_MIN_RATIO: float = float(
os.getenv("PDF_TEXT_DETECTION_MIN_RATIO", "0.6")
)
PDF_TEXT_DETECTION_MIN_AVG_CHARS: int = int(
os.getenv("PDF_TEXT_DETECTION_MIN_AVG_CHARS", "120")
)
# Error handling
ERROR_THROTTLE_SECONDS: int = int(os.getenv("ERROR_THROTTLE_SECONDS", "600"))
# GPU/VRAM Management
MODEL_TIMEOUT_SECONDS: int = int(os.getenv("MODEL_TIMEOUT_SECONDS", "300"))
CUDA_VISIBLE_DEVICES: str = os.getenv("CUDA_VISIBLE_DEVICES", "all")
PYTORCH_CUDA_ALLOC_CONF: str = os.getenv(
"PYTORCH_CUDA_ALLOC_CONF", "max_split_size_mb:512"
)
# GPU Detection (auto, nvidia, amd, cpu)
GPU_PREFERENCE: str = os.getenv("GPU_PREFERENCE", "auto")
# AMD ROCm HSA override for RX 6000 series (gfx1030)
HSA_OVERRIDE_GFX_VERSION: str = os.getenv("HSA_OVERRIDE_GFX_VERSION", "10.3.0")
# Dashboard
DASHBOARD_SECRET_KEY: str = os.getenv("DASHBOARD_SECRET_KEY", "")
DASHBOARD_PORT: int = int(os.getenv("DASHBOARD_PORT", "5000"))
DASHBOARD_HOST: str = os.getenv("DASHBOARD_HOST", "0.0.0.0")
# Logging
LOG_LEVEL: str = os.getenv("LOG_LEVEL", "INFO")
LOG_FILE: Optional[str] = os.getenv("LOG_FILE")
# Threading optimization
OMP_NUM_THREADS: int = int(os.getenv("OMP_NUM_THREADS", "4"))
MKL_NUM_THREADS: int = int(os.getenv("MKL_NUM_THREADS", "4"))
# Parallel Processing Configuration
MAX_PARALLEL_UPLOADS: int = int(os.getenv("MAX_PARALLEL_UPLOADS", "4"))
MAX_PARALLEL_AI_REQUESTS: int = int(os.getenv("MAX_PARALLEL_AI_REQUESTS", "3"))
MAX_PARALLEL_PROCESSING: int = int(os.getenv("MAX_PARALLEL_PROCESSING", "2"))
PARALLEL_AI_STRATEGY: str = os.getenv("PARALLEL_AI_STRATEGY", "race") # race, consensus, majority
BACKGROUND_NOTION_UPLOADS: bool = os.getenv("BACKGROUND_NOTION_UPLOADS", "true").lower() == "true"
# ========================================================================
# PROPERTIES WITH VALIDATION
# ========================================================================
@property
def is_production(self) -> bool:
"""Check if running in production mode"""
return not self.DEBUG
@property
def has_webdav_config(self) -> bool:
"""Check if WebDAV credentials are configured"""
return all([self.NEXTCLOUD_URL, self.NEXTCLOUD_USER, self.NEXTCLOUD_PASSWORD])
@property
def has_ai_config(self) -> bool:
"""Check if AI providers are configured"""
return any(
[
self.ZAI_AUTH_TOKEN,
self.GEMINI_API_KEY,
self.CLAUDE_CLI_PATH,
self.GEMINI_CLI_PATH,
]
)
@property
def has_notion_config(self) -> bool:
"""Check if Notion is configured"""
return bool(self.NOTION_API_TOKEN and self.NOTION_DATABASE_ID)
@property
def processed_files_path(self) -> Path:
"""Get the path to the processed files registry"""
return Path(
os.getenv(
"PROCESSED_FILES_PATH",
str(Path(self.LOCAL_STATE_DIR) / "processed_files.txt"),
)
)
@property
def nextcloud_url(self) -> str:
"""Get Nextcloud URL with validation"""
if not self.NEXTCLOUD_URL and self.is_production:
raise ConfigurationError("NEXTCLOUD_URL is required in production mode")
return self.NEXTCLOUD_URL
@property
def nextcloud_user(self) -> str:
"""Get Nextcloud username with validation"""
if not self.NEXTCLOUD_USER and self.is_production:
raise ConfigurationError("NEXTCLOUD_USER is required in production mode")
return self.NEXTCLOUD_USER
@property
def nextcloud_password(self) -> str:
"""Get Nextcloud password with validation"""
if not self.NEXTCLOUD_PASSWORD and self.is_production:
raise ConfigurationError(
"NEXTCLOUD_PASSWORD is required in production mode"
)
return self.NEXTCLOUD_PASSWORD
@property
def valid_webdav_config(self) -> bool:
"""Validate WebDAV configuration completeness"""
try:
_ = self.nextcloud_url
_ = self.nextcloud_user
_ = self.nextcloud_password
return True
except ConfigurationError:
return False
@property
def telegram_configured(self) -> bool:
"""Check if Telegram is properly configured"""
return bool(self.TELEGRAM_TOKEN and self.TELEGRAM_CHAT_ID)
@property
def has_gpu_support(self) -> bool:
"""Check if GPU support is available"""
try:
import torch
return torch.cuda.is_available()
except ImportError:
return False
@property
def environment_type(self) -> str:
"""Get environment type as string"""
return "production" if self.is_production else "development"
@property
def config_summary(self) -> dict:
"""Get configuration summary for logging"""
return {
"app_name": self.APP_NAME,
"version": self.APP_VERSION,
"environment": self.environment_type,
"debug": self.DEBUG,
"webdav_configured": self.has_webdav_config,
"ai_configured": self.has_ai_config,
"telegram_configured": self.telegram_configured,
"gpu_support": self.has_gpu_support,
"cpu_count": self.CPU_COUNT,
"poll_interval": self.POLL_INTERVAL,
}
# Create global settings instance
settings = Settings()

130
config/settings.py.backup Normal file
View File

@@ -0,0 +1,130 @@
"""
Centralized configuration management for CBCFacil
"""
import os
from pathlib import Path
from typing import Optional, Set
class Settings:
"""Application settings loaded from environment variables"""
# Application
APP_NAME: str = "CBCFacil"
APP_VERSION: str = "8.0"
DEBUG: bool = os.getenv("DEBUG", "false").lower() == "true"
# Nextcloud/WebDAV Configuration
NEXTCLOUD_URL: str = os.getenv("NEXTCLOUD_URL", "")
NEXTCLOUD_USER: str = os.getenv("NEXTCLOUD_USER", "")
NEXTCLOUD_PASSWORD: str = os.getenv("NEXTCLOUD_PASSWORD", "")
WEBDAV_ENDPOINT: str = NEXTCLOUD_URL
# Remote folders
REMOTE_AUDIOS_FOLDER: str = "Audios"
REMOTE_DOCX_AUDIO_FOLDER: str = "Documentos"
REMOTE_PDF_FOLDER: str = "Pdf"
REMOTE_TXT_FOLDER: str = "Textos"
RESUMENES_FOLDER: str = "Resumenes"
DOCX_FOLDER: str = "Documentos"
# Local paths
BASE_DIR: Path = Path(__file__).resolve().parent.parent
LOCAL_STATE_DIR: str = os.getenv("LOCAL_STATE_DIR", str(BASE_DIR))
LOCAL_DOWNLOADS_PATH: Path = BASE_DIR / "downloads"
LOCAL_RESUMENES: Path = LOCAL_DOWNLOADS_PATH
LOCAL_DOCX: Path = BASE_DIR / "resumenes_docx"
# Processing
POLL_INTERVAL: int = int(os.getenv("POLL_INTERVAL", "5"))
HTTP_TIMEOUT: int = int(os.getenv("HTTP_TIMEOUT", "30"))
WEBDAV_MAX_RETRIES: int = int(os.getenv("WEBDAV_MAX_RETRIES", "3"))
DOWNLOAD_CHUNK_SIZE: int = int(os.getenv("DOWNLOAD_CHUNK_SIZE", "65536")) # 64KB for better performance
MAX_FILENAME_LENGTH: int = int(os.getenv("MAX_FILENAME_LENGTH", "80"))
MAX_FILENAME_BASE_LENGTH: int = int(os.getenv("MAX_FILENAME_BASE_LENGTH", "40"))
MAX_FILENAME_TOPICS_LENGTH: int = int(os.getenv("MAX_FILENAME_TOPICS_LENGTH", "20"))
# File extensions
AUDIO_EXTENSIONS: Set[str] = {".mp3", ".wav", ".m4a", ".ogg", ".aac"}
PDF_EXTENSIONS: Set[str] = {".pdf"}
TXT_EXTENSIONS: Set[str] = {".txt"}
# AI Providers
ZAI_BASE_URL: str = os.getenv("ZAI_BASE_URL", "https://api.z.ai/api/anthropic")
ZAI_DEFAULT_MODEL: str = os.getenv("ZAI_MODEL", "glm-4.6")
ZAI_AUTH_TOKEN: Optional[str] = os.getenv("ANTHROPIC_AUTH_TOKEN") or os.getenv("ZAI_AUTH_TOKEN", "")
# Gemini
GEMINI_API_KEY: Optional[str] = os.getenv("GEMINI_API_KEY")
GEMINI_FLASH_MODEL: Optional[str] = os.getenv("GEMINI_FLASH_MODEL")
GEMINI_PRO_MODEL: Optional[str] = os.getenv("GEMINI_PRO_MODEL")
# CLI paths
GEMINI_CLI_PATH: Optional[str] = os.getenv("GEMINI_CLI_PATH")
CLAUDE_CLI_PATH: Optional[str] = os.getenv("CLAUDE_CLI_PATH")
# Telegram
TELEGRAM_TOKEN: Optional[str] = os.getenv("TELEGRAM_TOKEN")
TELEGRAM_CHAT_ID: Optional[str] = os.getenv("TELEGRAM_CHAT_ID")
# PDF Processing Configuration
CPU_COUNT: int = os.cpu_count() or 1
PDF_MAX_PAGES_PER_CHUNK: int = int(os.getenv("PDF_MAX_PAGES_PER_CHUNK", "2"))
PDF_DPI: int = int(os.getenv("PDF_DPI", "200"))
PDF_RENDER_THREAD_COUNT: int = int(os.getenv("PDF_RENDER_THREAD_COUNT", str(min(4, CPU_COUNT))))
PDF_BATCH_SIZE: int = int(os.getenv("PDF_BATCH_SIZE", "2"))
PDF_TROCR_MAX_BATCH: int = int(os.getenv("PDF_TROCR_MAX_BATCH", str(PDF_BATCH_SIZE)))
PDF_TESSERACT_THREADS: int = int(os.getenv("PDF_TESSERACT_THREADS", str(max(1, min(2, max(1, CPU_COUNT // 3))))))
PDF_PREPROCESS_THREADS: int = int(os.getenv("PDF_PREPROCESS_THREADS", str(PDF_TESSERACT_THREADS)))
PDF_TEXT_DETECTION_MIN_RATIO: float = float(os.getenv("PDF_TEXT_DETECTION_MIN_RATIO", "0.6"))
PDF_TEXT_DETECTION_MIN_AVG_CHARS: int = int(os.getenv("PDF_TEXT_DETECTION_MIN_AVG_CHARS", "120"))
# Error handling
ERROR_THROTTLE_SECONDS: int = int(os.getenv("ERROR_THROTTLE_SECONDS", "600"))
# GPU/VRAM Management
MODEL_TIMEOUT_SECONDS: int = int(os.getenv("MODEL_TIMEOUT_SECONDS", "300"))
CUDA_VISIBLE_DEVICES: str = os.getenv("CUDA_VISIBLE_DEVICES", "all")
PYTORCH_CUDA_ALLOC_CONF: str = os.getenv("PYTORCH_CUDA_ALLOC_CONF", "max_split_size_mb:512")
# Dashboard
DASHBOARD_SECRET_KEY: str = os.getenv("DASHBOARD_SECRET_KEY", "")
DASHBOARD_PORT: int = int(os.getenv("DASHBOARD_PORT", "5000"))
DASHBOARD_HOST: str = os.getenv("DASHBOARD_HOST", "0.0.0.0")
# Logging
LOG_LEVEL: str = os.getenv("LOG_LEVEL", "INFO")
LOG_FILE: Optional[str] = os.getenv("LOG_FILE")
# Threading optimization
OMP_NUM_THREADS: int = int(os.getenv("OMP_NUM_THREADS", "4"))
MKL_NUM_THREADS: int = int(os.getenv("MKL_NUM_THREADS", "4"))
@property
def is_production(self) -> bool:
"""Check if running in production mode"""
return not self.DEBUG
@property
def has_webdav_config(self) -> bool:
"""Check if WebDAV credentials are configured"""
return all([self.NEXTCLOUD_URL, self.NEXTCLOUD_USER, self.NEXTCLOUD_PASSWORD])
@property
def has_ai_config(self) -> bool:
"""Check if AI providers are configured"""
return any([
self.ZAI_AUTH_TOKEN,
self.GEMINI_API_KEY,
self.CLAUDE_CLI_PATH,
self.GEMINI_CLI_PATH
])
@property
def processed_files_path(self) -> Path:
"""Get the path to the processed files registry"""
return Path(os.getenv("PROCESSED_FILES_PATH", str(Path(self.LOCAL_STATE_DIR) / "processed_files.txt")))
# Create global settings instance
settings = Settings()

64
config/validators.py Normal file
View File

@@ -0,0 +1,64 @@
"""
Configuration validators for CBCFacil
"""
import logging
from typing import List, Dict
class ConfigurationError(Exception):
"""Raised when configuration is invalid"""
pass
def validate_environment() -> List[str]:
"""
Validate required environment variables and configuration.
Returns a list of warnings/errors.
"""
from .settings import settings
warnings = []
errors = []
# Check critical configurations
if not settings.has_webdav_config:
warnings.append("WebDAV credentials not configured - file sync will not work")
if not settings.has_ai_config:
warnings.append("No AI providers configured - summary generation will not work")
# Validate API keys format if provided
if settings.ZAI_AUTH_TOKEN:
if len(settings.ZAI_AUTH_TOKEN) < 10:
errors.append("ZAI_AUTH_TOKEN appears to be invalid (too short)")
if settings.GEMINI_API_KEY:
if len(settings.GEMINI_API_KEY) < 20:
errors.append("GEMINI_API_KEY appears to be invalid (too short)")
# Validate dashboard secret
if not settings.DASHBOARD_SECRET_KEY:
warnings.append("DASHBOARD_SECRET_KEY not set - using default is not recommended for production")
if settings.DASHBOARD_SECRET_KEY == "dashboard-secret-key-change-in-production":
warnings.append("Using default dashboard secret key - please change in production")
# Check CUDA availability
try:
import torch
if not torch.cuda.is_available():
warnings.append("CUDA not available - GPU acceleration will be disabled")
except ImportError:
warnings.append("PyTorch not installed - GPU acceleration will be disabled")
# Print warnings
for warning in warnings:
logging.warning(f"Configuration warning: {warning}")
# Raise error if critical issues
if errors:
error_msg = "Configuration errors:\n" + "\n".join(f"- {e}" for e in errors)
logging.error(error_msg)
raise ConfigurationError(error_msg)
return warnings

23
core/__init__.py Normal file
View File

@@ -0,0 +1,23 @@
"""
Core package for CBCFacil
"""
from .exceptions import (
ProcessingError,
WebDAVError,
AIProcessingError,
ConfigurationError,
FileProcessingError
)
from .result import Result
from .base_service import BaseService
__all__ = [
'ProcessingError',
'WebDAVError',
'AIProcessingError',
'ConfigurationError',
'FileProcessingError',
'Result',
'BaseService'
]

35
core/base_service.py Normal file
View File

@@ -0,0 +1,35 @@
"""
Base service class for CBCFacil services
"""
import logging
from abc import ABC, abstractmethod
from typing import Optional
class BaseService(ABC):
"""Base class for all services"""
def __init__(self, name: str):
self.name = name
self.logger = logging.getLogger(f"{__name__}.{name}")
@abstractmethod
def initialize(self) -> None:
"""Initialize the service"""
pass
@abstractmethod
def cleanup(self) -> None:
"""Cleanup service resources"""
pass
def health_check(self) -> bool:
"""Perform health check"""
return True
def __enter__(self):
self.initialize()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.cleanup()

38
core/exceptions.py Normal file
View File

@@ -0,0 +1,38 @@
"""
Custom exceptions for CBCFacil
"""
class ProcessingError(Exception):
"""Base exception for all processing errors"""
pass
class ConfigurationError(ProcessingError):
"""Raised when configuration is invalid"""
pass
class WebDAVError(ProcessingError):
"""Raised when WebDAV operations fail"""
pass
class AIProcessingError(ProcessingError):
"""Raised when AI processing fails"""
pass
class FileProcessingError(ProcessingError):
"""Raised when file processing fails"""
pass
class AuthenticationError(ProcessingError):
"""Raised when authentication fails"""
pass
class ValidationError(ProcessingError):
"""Raised when input validation fails"""
pass

355
core/health_check.py Normal file
View File

@@ -0,0 +1,355 @@
"""
Health check endpoint for CBCFacil service monitoring
"""
import json
import logging
from datetime import datetime
from typing import Dict, Any, List, Optional
from pathlib import Path
logger = logging.getLogger(__name__)
class HealthChecker:
"""Comprehensive health check for all service dependencies"""
def __init__(self):
self.logger = logging.getLogger(__name__)
def check_webdav_connection(self) -> Dict[str, Any]:
"""Check WebDAV service connectivity"""
from config import settings
result = {
"service": "webdav",
"status": "unknown",
"timestamp": datetime.utcnow().isoformat()
}
try:
from services.webdav_service import webdav_service
if not settings.has_webdav_config:
result["status"] = "not_configured"
result["message"] = "WebDAV credentials not configured"
return result
# Test connection with a simple list operation
webdav_service.list(".")
result["status"] = "healthy"
result["message"] = "WebDAV connection successful"
result["endpoint"] = settings.NEXTCLOUD_URL
except Exception as e:
result["status"] = "unhealthy"
result["error"] = str(e)
self.logger.error(f"WebDAV health check failed: {e}")
return result
def check_ai_providers(self) -> Dict[str, Any]:
"""Check AI provider configurations"""
from config import settings
result = {
"service": "ai_providers",
"status": "unknown",
"timestamp": datetime.utcnow().isoformat(),
"providers": {}
}
try:
# Check ZAI
if settings.ZAI_AUTH_TOKEN:
result["providers"]["zai"] = {
"configured": True,
"status": "unknown"
}
else:
result["providers"]["zai"] = {
"configured": False,
"status": "not_configured"
}
# Check Gemini
if settings.GEMINI_API_KEY:
result["providers"]["gemini"] = {
"configured": True,
"status": "unknown"
}
else:
result["providers"]["gemini"] = {
"configured": False,
"status": "not_configured"
}
# Check CLI providers
if settings.CLAUDE_CLI_PATH:
claude_path = Path(settings.CLAUDE_CLI_PATH)
result["providers"]["claude_cli"] = {
"configured": True,
"path_exists": claude_path.exists(),
"status": "available" if claude_path.exists() else "path_invalid"
}
if settings.GEMINI_CLI_PATH:
gemini_path = Path(settings.GEMINI_CLI_PATH)
result["providers"]["gemini_cli"] = {
"configured": True,
"path_exists": gemini_path.exists(),
"status": "available" if gemini_path.exists() else "path_invalid"
}
# Overall status
if settings.has_ai_config:
result["status"] = "healthy"
result["message"] = "At least one AI provider configured"
else:
result["status"] = "not_configured"
result["message"] = "No AI providers configured"
except Exception as e:
result["status"] = "error"
result["error"] = str(e)
self.logger.error(f"AI providers health check failed: {e}")
return result
def check_vram_manager(self) -> Dict[str, Any]:
"""Check VRAM manager status"""
result = {
"service": "vram_manager",
"status": "unknown",
"timestamp": datetime.utcnow().isoformat()
}
try:
from services.vram_manager import vram_manager
vram_info = vram_manager.get_vram_info()
result["status"] = "healthy"
result["vram_info"] = {
"total_gb": round(vram_info.get("total", 0) / (1024**3), 2),
"free_gb": round(vram_info.get("free", 0) / (1024**3), 2),
"allocated_gb": round(vram_info.get("allocated", 0) / (1024**3), 2)
}
result["cuda_available"] = vram_info.get("cuda_available", False)
except Exception as e:
result["status"] = "unavailable"
result["error"] = str(e)
self.logger.error(f"VRAM manager health check failed: {e}")
return result
def check_telegram_service(self) -> Dict[str, Any]:
"""Check Telegram service status"""
from config import settings
result = {
"service": "telegram",
"status": "unknown",
"timestamp": datetime.utcnow().isoformat()
}
try:
from services.telegram_service import telegram_service
if telegram_service.is_configured:
result["status"] = "healthy"
result["message"] = "Telegram service configured"
else:
result["status"] = "not_configured"
result["message"] = "Telegram credentials not configured"
except Exception as e:
result["status"] = "error"
result["error"] = str(e)
self.logger.error(f"Telegram service health check failed: {e}")
return result
def check_processed_registry(self) -> Dict[str, Any]:
"""Check processed files registry"""
result = {
"service": "processed_registry",
"status": "unknown",
"timestamp": datetime.utcnow().isoformat()
}
try:
from storage.processed_registry import processed_registry
# Try to load registry
processed_registry.load()
result["status"] = "healthy"
result["registry_path"] = str(processed_registry.registry_path)
# Check if registry file is writable
registry_file = Path(processed_registry.registry_path)
if registry_file.exists():
result["registry_exists"] = True
result["registry_writable"] = registry_file.is_file() and os.access(registry_file, os.W_OK)
else:
result["registry_exists"] = False
except Exception as e:
result["status"] = "unhealthy"
result["error"] = str(e)
self.logger.error(f"Processed registry health check failed: {e}")
return result
def check_disk_space(self) -> Dict[str, Any]:
"""Check available disk space"""
result = {
"service": "disk_space",
"status": "unknown",
"timestamp": datetime.utcnow().isoformat()
}
try:
import shutil
# Check main directory
usage = shutil.disk_usage(Path(__file__).parent.parent)
total_gb = usage.total / (1024**3)
free_gb = usage.free / (1024**3)
used_percent = (usage.used / usage.total) * 100
result["status"] = "healthy"
result["total_gb"] = round(total_gb, 2)
result["free_gb"] = round(free_gb, 2)
result["used_percent"] = round(used_percent, 2)
# Warning if low disk space
if free_gb < 1: # Less than 1GB
result["status"] = "warning"
result["message"] = "Low disk space"
elif free_gb < 5: # Less than 5GB
result["status"] = "degraded"
result["message"] = "Disk space running low"
except Exception as e:
result["status"] = "error"
result["error"] = str(e)
self.logger.error(f"Disk space health check failed: {e}")
return result
def check_configuration(self) -> Dict[str, Any]:
"""Check configuration validity"""
from config import settings
result = {
"service": "configuration",
"status": "unknown",
"timestamp": datetime.utcnow().isoformat()
}
try:
warnings = []
# Check for warnings
if not settings.has_webdav_config:
warnings.append("WebDAV not configured")
if not settings.has_ai_config:
warnings.append("AI providers not configured")
if not settings.telegram_configured:
warnings.append("Telegram not configured")
if settings.DASHBOARD_SECRET_KEY == "":
warnings.append("Dashboard secret key not set")
if settings.DASHBOARD_SECRET_KEY == "dashboard-secret-key-change-in-production":
warnings.append("Using default dashboard secret")
result["status"] = "healthy" if not warnings else "warning"
result["warnings"] = warnings
result["environment"] = settings.environment_type
except Exception as e:
result["status"] = "error"
result["error"] = str(e)
self.logger.error(f"Configuration health check failed: {e}")
return result
def run_full_health_check(self) -> Dict[str, Any]:
"""Run all health checks and return comprehensive status"""
checks = [
("configuration", self.check_configuration),
("webdav", self.check_webdav_connection),
("ai_providers", self.check_ai_providers),
("vram_manager", self.check_vram_manager),
("telegram", self.check_telegram_service),
("processed_registry", self.check_processed_registry),
("disk_space", self.check_disk_space)
]
results = {}
overall_status = "healthy"
for check_name, check_func in checks:
try:
result = check_func()
results[check_name] = result
# Track overall status
if result["status"] in ["unhealthy", "error"]:
overall_status = "unhealthy"
elif result["status"] in ["warning", "degraded"] and overall_status == "healthy":
overall_status = "warning"
except Exception as e:
results[check_name] = {
"service": check_name,
"status": "error",
"error": str(e),
"timestamp": datetime.utcnow().isoformat()
}
overall_status = "unhealthy"
self.logger.error(f"Health check {check_name} failed: {e}")
return {
"overall_status": overall_status,
"timestamp": datetime.utcnow().isoformat(),
"checks": results,
"summary": {
"total_checks": len(checks),
"healthy": sum(1 for r in results.values() if r["status"] == "healthy"),
"warning": sum(1 for r in results.values() if r["status"] == "warning"),
"unhealthy": sum(1 for r in results.values() if r["status"] == "unhealthy")
}
}
# Convenience function for CLI usage
def get_health_status() -> Dict[str, Any]:
"""Get comprehensive health status"""
checker = HealthChecker()
return checker.run_full_health_check()
if __name__ == "__main__":
# CLI usage: python core/health_check.py
import sys
import os
health = get_health_status()
print(json.dumps(health, indent=2))
# Exit with appropriate code
if health["overall_status"] == "healthy":
sys.exit(0)
elif health["overall_status"] == "warning":
sys.exit(1)
else:
sys.exit(2)

43
core/result.py Normal file
View File

@@ -0,0 +1,43 @@
"""
Result type for handling success/error cases
"""
from typing import TypeVar, Generic, Optional, Callable
from dataclasses import dataclass
T = TypeVar('T')
E = TypeVar('E')
@dataclass
class Success(Generic[T]):
"""Successful result with value"""
value: T
def is_success(self) -> bool:
return True
def is_error(self) -> bool:
return False
def map(self, func: Callable[[T], 'Success']) -> 'Success[T]':
"""Apply function to value"""
return func(self.value)
@dataclass
class Error(Generic[E]):
"""Error result with error value"""
error: E
def is_success(self) -> bool:
return False
def is_error(self) -> bool:
return True
def map(self, func: Callable) -> 'Error[E]':
"""Return self on error"""
return self
Result = Success[T] | Error[E]

126
create_notion_database.py Normal file
View File

@@ -0,0 +1,126 @@
#!/usr/bin/env python3
"""
Script para crear una nueva base de datos de Notion y compartirla automáticamente
"""
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent))
from config import settings
from notion_client import Client
def main():
print("\n" + "=" * 70)
print("🛠️ CREAR BASE DE DATOS DE NOTION PARA CBCFACIL")
print("=" * 70 + "\n")
token = settings.NOTION_API_TOKEN
if not token:
print("❌ Token no configurado en .env")
return
client = Client(auth=token)
# Primero, buscar una página donde crear la database
print("🔍 Buscando páginas accesibles...\n")
results = client.search(page_size=100)
pages = [p for p in results.get("results", []) if p.get("object") == "page"]
if not pages:
print("❌ No tienes páginas accesibles.")
print("\n📋 SOLUCIÓN:")
print("1. Ve a Notion y crea una nueva página")
print("2. En esa página, click en 'Share'")
print("3. Busca y agrega tu integración")
print("4. Ejecuta este script nuevamente\n")
return
# Mostrar páginas disponibles
print(f"✅ Encontradas {len(pages)} página(s) accesibles:\n")
for i, page in enumerate(pages[:10], 1):
page_id = page.get("id")
props = page.get("properties", {})
# Intentar obtener el título
title = "Sin título"
for prop_name, prop_data in props.items():
if prop_data.get("type") == "title":
title_list = prop_data.get("title", [])
if title_list:
title = title_list[0].get("plain_text", "Sin título")
break
print(f"{i}. {title[:50]}")
print(f" ID: {page_id}\n")
# Usar la primera página accesible
parent_page = pages[0]
parent_id = parent_page.get("id")
print("=" * 70)
print(f"📄 Voy a crear la base de datos dentro de la primera página")
print("=" * 70 + "\n")
try:
# Crear la base de datos
print("🚀 Creando base de datos 'CBCFacil - Documentos'...\n")
database = client.databases.create(
parent={"page_id": parent_id},
title=[
{
"type": "text",
"text": {"content": "CBCFacil - Documentos Procesados"},
}
],
properties={
"Name": {"title": {}},
"Status": {
"select": {
"options": [
{"name": "Procesado", "color": "green"},
{"name": "En Proceso", "color": "yellow"},
{"name": "Error", "color": "red"},
]
}
},
"Tipo": {
"select": {
"options": [
{"name": "AUDIO", "color": "purple"},
{"name": "PDF", "color": "orange"},
{"name": "TEXTO", "color": "gray"},
]
}
},
"Fecha": {"date": {}},
},
)
db_id = database["id"]
print("✅ ¡Base de datos creada exitosamente!")
print("=" * 70)
print(f"\n📊 Información de la base de datos:\n")
print(f" Nombre: CBCFacil - Documentos Procesados")
print(f" ID: {db_id}")
print(f" URL: https://notion.so/{db_id.replace('-', '')}")
print("\n=" * 70)
print("\n🎯 SIGUIENTE PASO:")
print("=" * 70)
print(f"\nActualiza tu archivo .env con:\n")
print(f"NOTION_DATABASE_ID={db_id}\n")
print("Luego ejecuta:")
print("python test_notion_integration.py\n")
print("=" * 70 + "\n")
except Exception as e:
print(f"❌ Error creando base de datos: {e}")
print("\nVerifica que la integración tenga permisos de escritura.\n")
if __name__ == "__main__":
main()

116
diagnose_notion.py Normal file
View File

@@ -0,0 +1,116 @@
#!/usr/bin/env python3
"""
Script para diagnosticar la integración de Notion
"""
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent))
from config import settings
from notion_client import Client
def main():
print("\n" + "=" * 70)
print("🔍 DIAGNÓSTICO COMPLETO DE NOTION")
print("=" * 70 + "\n")
token = settings.NOTION_API_TOKEN
database_id = settings.NOTION_DATABASE_ID
print(f"Token: {token[:30]}..." if token else "❌ Token no configurado")
print(f"Database ID: {database_id}\n")
if not token:
print("❌ Configura NOTION_API en .env\n")
return
client = Client(auth=token)
# Test 1: Verificar que el token sea válido
print("📝 Test 1: Verificando token...")
try:
# Intentar buscar páginas (cualquiera)
results = client.search(query="", page_size=1)
print("✅ Token válido - la integración está activa\n")
# Ver si tiene acceso a alguna página
pages = results.get("results", [])
if pages:
print(f"✅ La integración tiene acceso a {len(pages)} página(s)")
for page in pages[:3]:
page_id = page.get("id", "N/A")
page_type = page.get("object", "N/A")
print(f" - {page_type}: {page_id}")
else:
print("⚠️ La integración NO tiene acceso a ninguna página aún")
print(" Esto es normal si acabas de crear la integración.\n")
except Exception as e:
print(f"❌ Error con el token: {e}\n")
return
# Test 2: Verificar acceso a la base de datos específica
print("\n📊 Test 2: Verificando acceso a la base de datos CBC...")
try:
database = client.databases.retrieve(database_id=database_id)
print("✅ ¡ÉXITO! La integración puede acceder a la base de datos\n")
title = database.get("title", [{}])[0].get("plain_text", "Sin título")
print(f" Título: {title}")
print(f" ID: {database['id']}")
print(f"\n Propiedades:")
for prop_name in database.get("properties", {}).keys():
print(f"{prop_name}")
print("\n" + "=" * 70)
print("✅ TODO CONFIGURADO CORRECTAMENTE")
print("=" * 70)
print("\n🚀 Ejecuta: python test_notion_integration.py\n")
except Exception as e:
error_msg = str(e)
print(f"❌ No se puede acceder a la base de datos")
print(f" Error: {error_msg}\n")
if "Could not find database" in error_msg:
print("=" * 70)
print("⚠️ ACCIÓN REQUERIDA: Compartir la base de datos")
print("=" * 70)
print("\n📋 PASOS DETALLADOS:\n")
print("1. Abre Notion en tu navegador")
print("\n2. Ve a tu base de datos 'CBC'")
print(f" Opción A: Usa este link directo:")
print(f" → https://www.notion.so/{database_id.replace('-', '')}")
print(f"\n Opción B: Busca 'CBC' en tu workspace")
print("\n3. En la página de la base de datos, busca el botón '...' ")
print(" (tres puntos) en la esquina SUPERIOR DERECHA")
print("\n4. En el menú que se abre, busca:")
print("'Connections' (en inglés)")
print("'Conexiones' (en español)")
print("'Connect to' (puede variar)")
print("\n5. Haz click y verás un menú de integraciones")
print("\n6. Busca tu integración en la lista")
print(" (Debería tener el nombre que le pusiste al crearla)")
print("\n7. Haz click en tu integración para activarla")
print("\n8. Confirma los permisos cuando te lo pida")
print("\n9. Deberías ver un mensaje confirmando la conexión")
print("\n10. ¡Listo! Vuelve a ejecutar:")
print(" python verify_notion_permissions.py\n")
print("=" * 70)
# Crear una página de prueba simple para verificar
print("\n💡 ALTERNATIVA: Crear una nueva página de prueba\n")
print("Si no encuentras la opción de conexiones en tu base de datos,")
print("puedes crear una página nueva y compartirla con la integración:\n")
print("1. Crea una nueva página en Notion")
print("2. En esa página, click en 'Share' (Compartir)")
print("3. Busca tu integración y agrégala")
print("4. Luego convierte esa página en una base de datos")
print("5. Usa el ID de esa nueva base de datos\n")
if __name__ == "__main__":
main()

7
document/__init__.py Normal file
View File

@@ -0,0 +1,7 @@
"""
Document generation package for CBCFacil
"""
from .generators import DocumentGenerator
__all__ = ['DocumentGenerator']

669
document/generators.py Normal file
View File

@@ -0,0 +1,669 @@
"""
Document generation utilities - LaTeX Academic Summary System
This module generates comprehensive academic summaries in LaTeX format
following the specifications in latex/resumen.md (the SINGLE SOURCE OF TRUTH).
Parallel Processing: Uses multiple agents for accelerated summary generation:
- AI Provider Racing: Multiple AI providers generate in parallel
- Parallel Format Conversion: PDF + DOCX generated simultaneously
- Background Notion Uploads: Non-blocking uploads to Notion
"""
import logging
import subprocess
import shutil
import re
import threading
from pathlib import Path
from typing import Dict, Any, Optional, Tuple, Callable
from concurrent.futures import ThreadPoolExecutor, as_completed
from core import FileProcessingError
from config import settings
from services.ai import ai_provider_factory
from services.ai.prompt_manager import prompt_manager
def _sanitize_latex(latex_code: str) -> str:
"""
Pre-process LaTeX code to fix common errors before compilation.
This function applies automated fixes for known issues that AI models
frequently generate, reducing the need for fix_latex() iterations.
Currently handles:
- TikZ nodes with line breaks (\\\\) missing align=center
- Unbalanced environments (best effort)
"""
if not latex_code:
return latex_code
result = latex_code
# Fix TikZ nodes with \\\\ but missing align=center
# Pattern: \node[...] (name) {Text\\More};
# This is a common AI error - TikZ requires align=center for \\\\ in nodes
# We need to find \node commands and add align=center if they have \\\\ in content
# but don't already have align= in their options
def fix_tikz_node(match):
"""Fix a single TikZ node by adding align=center if needed"""
full_match = match.group(0)
options = match.group(1) # Content inside [...]
rest = match.group(2) # Everything after options
# Check if this node has \\\\ in its content (text between { })
# and doesn't already have align=
if "\\\\" in rest and "align=" not in options:
# Add align=center to the options
if options.strip():
new_options = options.rstrip() + ", align=center"
else:
new_options = "align=center"
return f"\\node[{new_options}]{rest}"
return full_match
# Match \node[options] followed by rest of the line
# Capture options and the rest separately
tikz_node_pattern = r"\\node\[([^\]]*)\]([^;]*;)"
result = re.sub(tikz_node_pattern, fix_tikz_node, result)
return result
class DocumentGenerator:
"""
Generates academic summary documents in LaTeX format.
The system follows these principles:
1. latex/resumen.md is the SINGLE SOURCE OF TRUTH for prompt structure
2. Generates full LaTeX documents (not Markdown)
3. Compiles to PDF using pdflatex
4. Supports iterative fixing with AI if compilation fails
5. Supports progress notifications via callback
"""
def __init__(self, notification_callback: Optional[Callable[[str], None]] = None):
"""
Initialize DocumentGenerator.
Args:
notification_callback: Optional callback function for progress notifications
Takes a single string argument (message to send)
"""
self.logger = logging.getLogger(__name__)
self.ai_provider = ai_provider_factory.get_best_provider()
self.notification_callback = notification_callback
self.use_parallel = ai_provider_factory.use_parallel()
self.executor = ThreadPoolExecutor(max_workers=4)
# Ensure output directories exist
settings.LOCAL_DOWNLOADS_PATH.mkdir(parents=True, exist_ok=True)
settings.LOCAL_DOCX.mkdir(parents=True, exist_ok=True)
if self.use_parallel:
self.logger.info(
"🚀 Parallel processing enabled: Multiple AI providers available"
)
def _notify(self, message: str) -> None:
"""Send notification if callback is configured"""
if self.notification_callback:
try:
self.notification_callback(message)
except Exception as e:
self.logger.warning(f"Failed to send notification: {e}")
def _generate_with_parallel_provider(self, prompt: str, **kwargs) -> str:
"""
Generate content using multiple AI providers in parallel.
Races multiple providers and returns the first successful response,
or the best quality response if using consensus strategy.
"""
try:
parallel_provider = ai_provider_factory.get_parallel_provider(max_workers=4)
self.logger.info("🚀 Using parallel AI provider (race mode)")
result = parallel_provider.generate_parallel(
prompt=prompt,
strategy="race", # Use first successful response
timeout_ms=300000, # 5 minutes
**kwargs,
)
self.logger.info(
f"✅ Parallel generation complete: {result.selected_provider} selected, "
f"{result.total_duration_ms}ms"
)
return result.content
except Exception as e:
self.logger.warning(
f"⚠️ Parallel generation failed: {e}, falling back to single provider"
)
return self.ai_provider.generate_text(prompt, **kwargs)
def _convert_formats_parallel(
self, tex_path: Path, pdf_path: Optional[Path], base_name: str
) -> Optional[Path]:
"""
Convert to multiple formats in parallel (DOCX, optionally PDF).
If PDF is already compiled, only DOCX is generated.
Otherwise, both PDF and DOCX are generated in parallel.
"""
futures = {}
# Generate DOCX
if shutil.which("pandoc"):
futures["docx"] = self.executor.submit(
self._convert_tex_to_docx, tex_path, base_name
)
# Wait for DOCX completion
docx_path = None
if "docx" in futures:
try:
docx_path = futures["docx"].result(timeout=60)
if docx_path:
self.logger.info(f"✅ Parallel DOCX generated: {docx_path}")
except Exception as e:
self.logger.warning(f"⚠️ DOCX generation failed: {e}")
return docx_path
def _upload_to_notion_background(
self,
base_name: str,
summary: str,
pdf_path: Optional[Path],
metadata: Dict[str, Any],
):
"""Upload to Notion in background thread (non-blocking)."""
def upload_worker():
try:
from services.notion_service import notion_service
title = base_name.replace("_", " ").title()
notion_metadata = {
"file_type": "Audio",
"pdf_path": pdf_path or Path(""),
"add_status": False,
"use_as_page": False,
}
page_id = notion_service.create_page_with_summary(
title=title, summary=summary, metadata=notion_metadata
)
if page_id:
metadata["notion_uploaded"] = True
metadata["notion_page_id"] = page_id
self.logger.info(
f"✅ Background upload to Notion complete: {title}"
)
else:
self.logger.warning(f"⚠️ Background Notion upload failed: {title}")
except Exception as e:
self.logger.warning(f"❌ Background Notion upload error: {e}")
# Start background thread
thread = threading.Thread(target=upload_worker, daemon=True)
thread.start()
self.logger.info("🔄 Notion upload started in background")
def generate_summary(
self,
text: str,
base_name: str,
materia: str = "Economía",
bibliographic_text: Optional[str] = None,
class_number: Optional[int] = None,
) -> Tuple[bool, str, Dict[str, Any]]:
"""
Generate comprehensive academic summary in LaTeX format.
Args:
text: The class transcription text
base_name: Base filename for output files
materia: Subject name (default: "Economía")
bibliographic_text: Optional supporting material from books/notes
class_number: Optional class number for header
Returns:
Tuple of (success, summary_text, metadata)
"""
self.logger.info(
f"🚀 Starting LaTeX academic summary generation for: {base_name}"
)
metadata = {
"filename": base_name,
"tex_path": "",
"pdf_path": "",
"markdown_path": "",
"docx_path": "",
"summary_snippet": "",
"notion_uploaded": False,
"notion_page_id": None,
"materia": materia,
}
try:
# === STEP 1: Generate LaTeX content using AI ===
self.logger.info(
"🧠 Sending request to AI Provider for LaTeX generation..."
)
self._notify("📝 Preparando prompt de resumen académico...")
prompt = prompt_manager.get_latex_summary_prompt(
transcription=text,
materia=materia,
bibliographic_text=bibliographic_text,
class_number=class_number,
)
self._notify(
"🧠 Enviando solicitud a la IA (esto puede tardar unos minutos)..."
)
# Use parallel provider if multiple AI providers are available
if self.use_parallel:
raw_response = self._generate_with_parallel_provider(prompt)
else:
raw_response = self.ai_provider.generate_text(prompt)
if not raw_response:
raise FileProcessingError("AI returned empty response")
self.logger.info(f"📝 AI response received: {len(raw_response)} characters")
self._notify(f"✅ Respuesta recibida ({len(raw_response)} caracteres)")
# === STEP 2: Extract clean LaTeX from AI response ===
self._notify("🔍 Extrayendo código LaTeX...")
latex_content = prompt_manager.extract_latex_from_response(raw_response)
if not latex_content:
self.logger.warning(
"⚠️ No valid LaTeX found in response, treating as Markdown"
)
self._notify("⚠️ No se detectó LaTeX válido, usando modo compatible...")
# Fallback to Markdown processing
return self._fallback_to_markdown(raw_response, base_name, metadata)
self.logger.info("✨ Valid LaTeX content detected")
self._notify(f"✨ LaTeX detectado: {len(latex_content)} caracteres")
# === STEP 3: Compilation Loop with Self-Correction ===
max_retries = 3
current_latex = latex_content
for attempt in range(max_retries + 1):
# Sanitize LaTeX before saving (fix common AI errors like TikZ nodes)
current_latex = _sanitize_latex(current_latex)
# Save current .tex file
self._notify(
f"📄 Guardando archivo .tex (intento {attempt + 1}/{max_retries + 1})..."
)
tex_path = settings.LOCAL_DOWNLOADS_PATH / f"{base_name}.tex"
tex_path.write_text(current_latex, encoding="utf-8")
metadata["tex_path"] = str(tex_path)
# Try to compile
self._notify("⚙️ Primera pasada de compilación LaTeX...")
pdf_path = self._compile_latex(
tex_path, output_dir=settings.LOCAL_DOWNLOADS_PATH
)
if pdf_path:
self.logger.info(
f"✅ Compilation success on attempt {attempt + 1}!"
)
self._notify("✅ PDF generado exitosamente!")
metadata["pdf_path"] = str(pdf_path)
# Generate DOCX in parallel
self._notify("📄 Generando archivo DOCX en paralelo...")
docx_path = self._convert_formats_parallel(
tex_path, pdf_path, base_name
)
if docx_path:
self._notify("✅ DOCX generado exitosamente!")
metadata["docx_path"] = str(docx_path)
# Create a text summary for Notion/preview
text_summary = self._create_text_summary(current_latex)
metadata["summary_snippet"] = text_summary[:500] + "..."
# Upload to Notion in background if configured
if settings.has_notion_config:
self._notify("📤 Iniciando carga a Notion en segundo plano...")
self._upload_to_notion_background(
base_name=base_name,
summary=text_summary,
pdf_path=pdf_path,
metadata=metadata,
)
self._notify("🎉 ¡Resumen completado con éxito!")
return True, text_summary, metadata
# Compilation failed - ask AI to fix
if attempt < max_retries:
self.logger.warning(
f"⚠️ Compilation failed (Attempt {attempt + 1}/{max_retries + 1}). "
f"Requesting AI fix..."
)
self._notify(
f"⚠️ Error de compilación ({attempt + 1}/{max_retries + 1}), solicitando corrección a IA..."
)
# Get error log
log_file = settings.LOCAL_DOWNLOADS_PATH / f"{base_name}.log"
error_log = "Log file not found"
if log_file.exists():
error_log = log_file.read_text(
encoding="utf-8", errors="ignore"
)[-2000:]
# Ask AI to fix
try:
self._notify("🔧 La IA está corrigiendo el código LaTeX...")
if hasattr(self.ai_provider, "fix_latex"):
fixed_latex = self.ai_provider.fix_latex(
current_latex, error_log
)
cleaned = prompt_manager.extract_latex_from_response(
fixed_latex
)
if cleaned:
current_latex = cleaned
else:
current_latex = fixed_latex
self._notify(
"✅ Código LaTeX corregido, reintentando compilación..."
)
else:
self.logger.error(
"❌ AI provider doesn't support fix_latex()"
)
break
except Exception as e:
self.logger.error(f"❌ AI fix request failed: {e}")
break
else:
self.logger.error(
"❌ Max retries reached. LaTeX compilation failed."
)
self._notify(
"❌ No se pudo compilar el LaTeX después de varios intentos"
)
# If we get here, all compilation attempts failed
self._notify("⚠️ Usando modo de compatibilidad Markdown...")
return self._fallback_to_markdown(
current_latex or raw_response, base_name, metadata
)
except Exception as e:
self.logger.error(
f"❌ Critical error in document generation: {e}", exc_info=True
)
self._notify(f"❌ Error en la generación: {str(e)[:100]}")
return False, "", metadata
def _compile_latex(self, tex_path: Path, output_dir: Path) -> Optional[Path]:
"""
Compile LaTeX to PDF using pdflatex. Runs twice for TOC.
Args:
tex_path: Path to .tex file
output_dir: Directory for output files
Returns:
Path to generated PDF or None if failed
"""
base_name = tex_path.stem
expected_pdf = output_dir / f"{base_name}.pdf"
# Check if pdflatex is available
if not shutil.which("pdflatex"):
self.logger.error("🚫 pdflatex not found in system PATH")
return None
cmd = [
"pdflatex",
"-interaction=nonstopmode",
"-halt-on-error",
f"-output-directory={output_dir}",
str(tex_path),
]
try:
# Pass 1
self.logger.info("⚙️ Compiling LaTeX (Pass 1/2)...")
subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=False,
timeout=120,
)
# Pass 2 (for TOC resolution)
self.logger.info("⚙️ Compiling LaTeX (Pass 2/2)...")
result = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=False,
timeout=120,
)
if result.returncode == 0 and expected_pdf.exists():
self.logger.info(f"✅ PDF generated: {expected_pdf}")
self._cleanup_latex_aux(output_dir, base_name)
return expected_pdf
else:
# Read log file for error info
log_file = output_dir / f"{base_name}.log"
error_snippet = "Unknown error"
if log_file.exists():
try:
log_content = log_file.read_text(
encoding="utf-8", errors="ignore"
)
errors = [
line
for line in log_content.splitlines()
if line.startswith("!")
]
if errors:
error_snippet = errors[0][:200]
except:
pass
self.logger.error(f"❌ LaTeX compilation failed: {error_snippet}")
return None
except subprocess.TimeoutExpired:
self.logger.error("❌ LaTeX compilation timed out")
return None
except Exception as e:
self.logger.error(f"❌ Error during LaTeX execution: {e}")
return None
def _convert_tex_to_docx(self, tex_path: Path, base_name: str) -> Optional[Path]:
"""Convert .tex to .docx using Pandoc."""
if not shutil.which("pandoc"):
self.logger.warning("⚠️ pandoc not found, skipping DOCX generation")
return None
docx_path = settings.LOCAL_DOCX / f"{base_name}.docx"
cmd = ["pandoc", str(tex_path), "-o", str(docx_path)]
try:
subprocess.run(cmd, capture_output=True, text=True, check=True, timeout=60)
self.logger.info(f"✅ DOCX generated: {docx_path}")
return docx_path
except Exception as e:
self.logger.warning(f"⚠️ DOCX generation failed: {e}")
return None
def _create_text_summary(self, latex_content: str) -> str:
"""Extract a plain text summary from LaTeX content for Notion/preview."""
# Remove LaTeX commands and keep content
text = latex_content
# Remove document class and packages
text = re.sub(r"\\documentclass\[?[^\]]*\]?\{[^\}]+\}", "", text)
text = re.sub(r"\\usepackage\{[^\}]+\}", "", text)
text = re.sub(r"\\geometry\{[^\}]+\}", "", text)
text = re.sub(r"\\pagestyle\{[^\}]+\}", "", text)
text = re.sub(r"\\fancyhf\{\}", "", text)
text = re.sub(r"\\fancyhead\[?[^\]]*\]?\{[^\}]+\}", "", text)
text = re.sub(r"\\fancyfoot\[?[^\]]*\]?\{[^\}]+\}", "", text)
# Convert sections to markdown-style
text = re.sub(r"\\section\*?\{([^\}]+)\}", r"# \1", text)
text = re.sub(r"\\subsection\*?\{([^\}]+)\}", r"## \1", text)
text = re.sub(r"\\subsubsection\*?\{([^\}]+)\}", r"### \1", text)
# Remove tcolorbox environments (keep content)
text = re.sub(
r"\\begin\{(definicion|importante|ejemplo)\}\[?[^\]]*\]?",
r"\n**\1:** ",
text,
)
text = re.sub(r"\\end\{(definicion|importante|ejemplo)\}", "", text)
# Convert itemize to bullets
text = re.sub(r"\\item\s*", "- ", text)
text = re.sub(r"\\begin\{(itemize|enumerate)\}", "", text)
text = re.sub(r"\\end\{(itemize|enumerate)\}", "", text)
# Clean up math (basic)
text = re.sub(r"\$\$([^\$]+)\$\$", r"\n\n\1\n\n", text)
text = re.sub(r"\$([^\$]+)\$", r"\1", text)
# Remove remaining LaTeX commands
text = re.sub(r"\\[a-zA-Z]+(\{[^\}]*\})*", "", text)
text = re.sub(r"[{}]", "", text)
# Clean whitespace
text = re.sub(r"\n\s*\n\s*\n", "\n\n", text)
text = text.strip()
return text
def _fallback_to_markdown(
self, content: str, base_name: str, metadata: Dict[str, Any]
) -> Tuple[bool, str, Dict[str, Any]]:
"""Fallback when LaTeX generation fails."""
self.logger.warning("⚠️ Falling back to Markdown processing")
md_path = settings.LOCAL_DOWNLOADS_PATH / f"{base_name}_resumen.md"
md_path.write_text(content, encoding="utf-8")
metadata["markdown_path"] = str(md_path)
# Try to convert to PDF via pandoc
if shutil.which("pandoc"):
pdf_path = self._convert_md_to_pdf(md_path, base_name)
if pdf_path:
metadata["pdf_path"] = str(pdf_path)
docx_path = self._convert_md_to_docx(md_path, base_name)
if docx_path:
metadata["docx_path"] = str(docx_path)
metadata["summary_snippet"] = content[:500] + "..."
return True, content, metadata
def _convert_md_to_pdf(self, md_path: Path, base_name: str) -> Optional[Path]:
"""Convert Markdown to PDF using pandoc."""
pdf_path = settings.LOCAL_DOWNLOADS_PATH / f"{base_name}.pdf"
cmd = [
"pandoc",
str(md_path),
"-o",
str(pdf_path),
"--pdf-engine=pdflatex",
"-V",
"geometry:margin=2.5cm",
]
try:
subprocess.run(cmd, capture_output=True, text=True, check=True, timeout=60)
self.logger.info(f"✅ PDF from Markdown: {pdf_path}")
return pdf_path
except Exception as e:
self.logger.warning(f"⚠️ PDF from Markdown failed: {e}")
return None
def _convert_md_to_docx(self, md_path: Path, base_name: str) -> Optional[Path]:
"""Convert Markdown to DOCX using pandoc."""
docx_path = settings.LOCAL_DOCX / f"{base_name}.docx"
cmd = ["pandoc", str(md_path), "-o", str(docx_path)]
try:
subprocess.run(cmd, capture_output=True, text=True, check=True, timeout=60)
self.logger.info(f"✅ DOCX from Markdown: {docx_path}")
return docx_path
except Exception as e:
self.logger.warning(f"⚠️ DOCX from Markdown failed: {e}")
return None
def _cleanup_latex_aux(self, output_dir: Path, base_name: str):
"""Clean up auxiliary LaTeX files."""
extensions = [".aux", ".log", ".out", ".toc"]
for ext in extensions:
aux_file = output_dir / f"{base_name}{ext}"
if aux_file.exists():
try:
aux_file.unlink()
except:
pass
def _upload_to_notion(
self,
base_name: str,
summary: str,
pdf_path: Optional[Path],
metadata: Dict[str, Any],
):
"""Upload summary to Notion if configured."""
try:
from services.notion_service import notion_service
title = base_name.replace("_", " ").title()
notion_metadata = {
"file_type": "Audio",
"pdf_path": pdf_path or Path(""),
"add_status": False,
"use_as_page": False,
}
page_id = notion_service.create_page_with_summary(
title=title, summary=summary, metadata=notion_metadata
)
if page_id:
metadata["notion_uploaded"] = True
metadata["notion_page_id"] = page_id
self.logger.info(f"✅ Uploaded to Notion: {title}")
else:
self.logger.warning(f"⚠️ Notion upload failed: {title}")
except Exception as e:
self.logger.warning(f"❌ Notion upload error: {e}")

BIN
kubectl Normal file

Binary file not shown.

View File

@@ -0,0 +1,447 @@
\documentclass[11pt,a4paper]{article}
\usepackage[utf8]{inputenc}
\usepackage[spanish,provide=*]{babel}
\usepackage{amsmath,amssymb}
\usepackage{geometry}
\usepackage{graphicx}
\usepackage{tikz}
\usetikzlibrary{arrows.meta,positioning,shapes.geometric,calc}
\usepackage{booktabs}
\usepackage{enumitem}
\usepackage{fancyhdr}
\usepackage{titlesec}
\usepackage{tcolorbox}
\usepackage{array}
\usepackage{multirow}
\geometry{margin=2.5cm}
\pagestyle{fancy}
\fancyhf{}
\fancyhead[L]{Economía - CBC}
\fancyhead[R]{Clase: Revolución Rusa y Crisis del 30}
\fancyfoot[C]{\thepage}
% Cajas para destacar contenido
\newtcolorbox{definicion}[1][]{
colback=blue!5!white,
colframe=blue!75!black,
fonttitle=\bfseries,
title=#1
}
\newtcolorbox{importante}[1][]{
colback=red!5!white,
colframe=red!75!black,
fonttitle=\bfseries,
title=#1
}
\newtcolorbox{ejemplo}[1][]{
colback=green!5!white,
colframe=green!50!black,
fonttitle=\bfseries,
title=#1
}
\title{\textbf{Revolución Rusa y Crisis del 30}\\
\large Ciclos Económicos, Socialismo y la Gran Depresión}
\author{CBC - UBA}
\date{\today}
\begin{document}
\maketitle
\tableofcontents
\newpage
\section{Introducción}
La presente clase aborda dos de los procesos económicos más transformadores del siglo XX: la \textbf{Revolución Rusa} y la \textbf{Gran Depresión de 1929}. Ambos eventos representan puntos de inflexión en la historia económica mundial y dan lugar a nuevas formas de organización económica, así como a teorías económicas que buscan explicar y solucionar las crisis capitalistas.
El contexto de la Revolución Rusa se sitúa en un imperio zarista en crisis, donde las ideas marxistas encuentran terreno fértil tras años de marginalización del proletariado industrial y rural. Por otro lado, la Crisis del 30 representa el colapso del modelo económico liberal y el surgimiento de nuevas teorías intervencionistas.
\section{Contexto Histórico: El Siglo XIX y las Ideas Revolucionarias}
\subsection{El surgimiento del proletariado y las ideas marxistas}
Durante la década de 1830-1840, se observó un fenómeno social particular: la \textbf{invención del proletariado} por parte de la burguesía media. Este término se refiere a la clase trabajadora industrial que surgió con la Revolución Industrial.
\begin{definicion}[Proletariado]
El proletariado es la clase social que carece de medios de producción y debe vender su fuerza de trabajo para subsistir. Surge con la industrialización y la concentración de la propiedad en manos de la burguesía.
\end{definicion}
Según Kovalevsky, \textit{"el comunismo se asoma sobre Europa"} en este período. Los trabajadores comenzaron a enarbolar la \textbf{bandera roja} en lugar de las banderas nacionales francesas, simbolizando la identificación con la ideología marxista.
\subsection{El crecimiento económico y el paréntesis revolucionario}
El crecimiento económico impulsado por la \textbf{Revolución de los Transportes} (ferrocarriles, barcos a vapor) puso temporalmente en reposo las ideas revolucionarias. La expansión económica generó empleos y mejoró las condiciones de vida, postergando las tensiones sociales.
\begin{importante}[Idea clave]
El crecimiento económico funciona como un \textit{paréntesis} para las ideas revolucionarias. Es necesario esperar la Gran Depresión para que estas ideas resurjan con fuerza.
\end{importante}
\subsection{La disyuntiva del marxismo: socialdemocracia vs. revolución}
Con el desarrollo de las ideas marxistas, surgió una disyuntiva fundamental:
\begin{itemize}
\item \textbf{Socialdemocracia}: Acceder al poder político a través del sistema democrático y utilizar el Estado para generar condiciones de igualdad y redistribución.
\item \textbf{Revolución proletaria}: Toma del poder por la fuerza de la clase trabajadora para establecer la dictadura del proletariado.
\end{itemize}
\section{Estados de Bienestar vs. Estados Intervencionistas}
Es fundamental distinguir entre dos tipos de intervención estatal que surgieron en este período:
\begin{table}[h]
\centering
\begin{tabular}{@{}p{0.45\textwidth}@{}p{0.45\textwidth}@{}}
\toprule
\textbf{Estado Intervencionista} & \textbf{Estado de Bienestar} \\
\midrule
Protege el producto nacional ante la competencia extranjera & Protege a los trabajadores y busca el bienestar de la población \\
Implementa aranceles y barreras comerciales & Garantiza derechos laborales y seguridad social \\
Fomenta la industria nacional & Provee servicios de salud, educación y vivienda \\
\bottomrule
\end{tabular}
\caption{Comparación entre Estado Intervencionista y Estado de Bienestar}
\end{table}
\subsection{Conquistas laborales del período}
Las luchas obreras de este período lograron conquistas fundamentales:
\begin{itemize}
\item \textbf{Jornada de 8 horas}: 8 horas de trabajo, 8 horas de descanso, 8 horas de libre disposición
\item \textbf{Protección infantil}: Limitación del trabajo infantil
\item \textbf{Protección materna}: Derechos para mujeres embarazadas
\item \textbf{Condiciones de trabajo}: Mejoras en seguridad y salubridad
\end{itemize}
\section{La Revolución Rusa}
\subsection{Antecedentes: la Rusia zarista en crisis}
La Rusia zarista presentaba características particulares que la diferenciaban de otras potencias europeas:
\begin{itemize}
\item \textbf{Economía predominantemente agraria}: 80\% de la economía era agrícola a fines del siglo XIX
\item \textbf{Industrialización tardía e incompleta}: A diferencia de Estados Unidos y Alemania, Rusia no se había industrializado significativamente
\item \textbf{Crisis agraria}: Durante la Gran Depresión, hubo hambrunas generalizadas
\item \textbf{Participación en la Primera Guerra Mundial}: Rusia no tuvo un buen desempeño bélico
\end{itemize}
\begin{importante}[Contradicción fundamental]
La teoría marxista preveía una revolución proletaria industrial, pero Rusia era un país 80\% agrícola. Esta contradicción es central para entender el desarrollo posterior de la revolución.
\end{importante}
\subsection{Las leyes de Marx y la situación rusa}
Las tres leyes de Marx parecían cumplirse en Rusia:
\begin{enumerate}
\item \textbf{Caída de precios} $\rightarrow$ \textbf{Caída de la tasa de beneficio}
\item \textbf{Caída de la tasa de beneficio} $\rightarrow$ \textbf{Caída de la producción}
\item \textbf{Caída de la producción} $\rightarrow$ \textbf{Aumento del desempleo}
\item \textbf{Aumento del desempleo} $\rightarrow$ \textbf{Descontento social} $\rightarrow$ \textbf{Revolución}
\end{enumerate}
\subsection{La válvula de escape: la migración}
En Europa, la migración masiva a América funcionó como una válvula de escape que redujo las tensiones sociales. Millones de personas cruzaron el océano para trabajar, evitando que las tensiones políticas alcanzaran niveles críticos.
\begin{ejemplo}[Migración italiana]
El campesino italiano viajaba 15.000 kilómetros para cosechar meta en América y volver a Italia con ahorros. Esta migración evitó concentraciones demográficas que hubieran alimentado el conflicto social.
\end{ejemplo}
\subsection{El proceso revolucionario}
\subsubsection{Caída del Zar Nicolás II}
El zar Nicolás II perdió legitimidad ante la sociedad. La Primera Guerra Mundial exacerbó los problemas económicos y generó hambruna tanto en las ciudades como en el frente de batalla.
\subsubsection{Revolución de febrero de 1917}
Liderada por \textbf{Vladimir Lenin}, esta revolución inicial derrocó al zar. Sin embargo, existe una contradicción fundamental: la revolución fue apoyada principalmente por el campo, no por los trabajadores industriales.
\begin{definicion}[Válvula de escape histórica]
La migración masiva a América funcionó como una válvula de escape que redujo las tensiones sociales en Europa. Al permitir que millones de personas encontraran trabajo en el Nuevo Mundo, se evitó que las tensiones políticas alcanzaran niveles críticos.
\end{definicion}
\section{Tres Etapas de la Revolución Rusa}
La Revolución Rusa se desarrolló en tres etapas bien diferenciadas, cada una con políticas económicas específicas.
\subsection{Primera Etapa: Comunismo de Guerra (1918-1921)}
El Comunismo de Guerra implementó medidas radicales de transformación económica:
\begin{enumerate}
\item \textbf{Expropiación de tierras}: Las tierras de la nobleza y grandes terratenientes fueron expropiadas y entregadas \textit{en propiedad privada} a los campesinos que apoyaron la revolución.
\begin{importante}[Primera contradicción]
La revolución socialista creó propietarios privados en el campo. Esto contradice el principio marxista de abolición de la propiedad privada.
\end{importante}
\item \textbf{Control de las empresas}: Los trabajadores tomaron el control de las empresas, expropiando a los propietarios anteriores.
\item \textbf{Creación de los Soviets}: Consejos de delegados de las diferentes fábricas que decidían sobre producción, destino y productividad.
\item \textbf{Consejo económico supremo}: Organismo central para dirigir el comercio interno, externo y las relaciones comerciales.
\item \textbf{Nacionalización de la banca}: Los bancos más importantes fueron nacionalizados.
\item \textbf{Desconocimiento de la deuda externa}: La Unión Soviética desconoció la deuda externa contraída por el zar.
\end{enumerate}
\subsubsection{Resultados del Comunismo de Guerra}
\begin{itemize}
\item \textbf{Caída de la productividad agrícola}: Al dividir las grandes haciendas en parcelas pequeñas, la producción disminuyó
\item \textbf{Hambruna persistente}: Los problemas para alimentar a las ciudades continuaron
\item \textbf{Represión estatal}: El Estado comenzó a sancionar, reprimir y requisar producción
\item \textbf{Estancamiento industrial}: Sin capital para invertir en maquinaria agrícola, la industria pesada no se desarrolló
\end{itemize}
\subsection{Segunda Etapa: Nueva Política Económica - NEP (1921-1928)}
Ante el fracaso del Comunismo de Guerra, se implementó la NEP bajo el liderazgo de Lenin:
\begin{definicion}[NEP - Nueva Política Económica]
La NEP consistió en otorgar libertad de mercado al sector agrícola dentro de una revolución marxista. Los campesinos podían vender su producción en el mercado libre, determinando precios y destinos.
\end{definicion}
\textbf{Objetivo de la NEP:}
\begin{itemize}
\item Reactivar la producción agrícola
\item Generar excedentes que volcar a la industrialización
\item Crear un círculo virtuoso: más agricultura $\rightarrow$ más demanda de maquinaria $\rightarrow$ más industria $\rightarrow$ más empleo $\rightarrow$ más consumo
\end{itemize}
\subsubsection{La contradicción de la NEP}
\begin{importante}[Renuncia a la industrialización acelerada]
La NEP implicaba renunciar a la industrialización acelerada porque requería esperar el \textit{"derrame"} desde el campo. La transición del feudalismo al capitalismo había demorado dos siglos; la URSS no podía esperar tanto.
\end{importante}
\subsubsection{La grieta en el partido bolchevique}
Se abrió una división interna:
\begin{table}[h]
\centering
\begin{tabular}{@{}p{0.45\textwidth}@{}p{0.45\textwidth}@{}}
\toprule
\textbf{Defensores de la NEP} & \textbf{Partidarios de industrialización acelerada} \\
\midrule
Esperar el desarrollo del campo & Industrialización forzada desde el Estado \\
Derrame espontáneo del capital & Planificación centralizada \\
Stalin (luego de cambiar de posición) & Trotsky \\
\bottomrule
\end{tabular}
\caption{Divisiones en el partido bolchevique}
\end{table}
\subsection{Tercera Etapa: Colectivización e Industrialización Forzada (1928-1941)}
Bajo el liderazgo de \textbf{Iósif Stalin}, se implementó la colectivización forzosa:
\begin{itemize}
\item \textbf{Colectivización de la tierra}: Las parcelas privadas fueron reunidas en granjas estatales (\textit{koljoses} y \textit{sovjoses})
\item \textbf{Eliminación de la propiedad privada}: El campesino dueño de una parcela la perdió en favor del Estado
\item \textbf{Industrialización pesada}: Todos los recursos se volcaron a la industria pesada
\end{itemize}
\begin{importante}[Costo humano]
La colectivización causó millones de muertes. Sumado a las víctimas de la Primera Guerra Mundial, la Guerra Civil y la Segunda Guerra Mundial, el costo humano de la transformación soviética fue enorme.
\end{importante}
\subsubsection{Resultados de la colectivización}
\begin{itemize}
\item 75\% del comercio interno en manos del Estado
\item Banca 100\% estatal
\item Solo 3\% del sector agrícola permaneció en manos privadas
\item Mientras Occidente se hundía en la Gran Depresión, la URSS entraba en la senda de la industrialización pesada
\end{itemize}
\section{La Gran Depresión de 1929}
\subsection{Contexto: los gloriosos años 20 en Estados Unidos}
Estados Unidos emergió de la Primera Guerra Mundial como:
\begin{itemize}
\item Uno de los mayores beneficiados del conflicto
\item Mayor exportador de manufacturas
\item Nueva potencia financiera (desplazó a Inglaterra)
\end{itemize}
La década de 1920 se caracterizó por:
\begin{itemize}
\item Crecimiento económico aparentemente infinito
\item Aumento del salario real del trabajador promedio
\item Expansión del consumo a crédito
\item Especulación bursátil
\end{itemize}
\subsection{Causales de la crisis (no solo el crack)}
Es fundamental entender que el \textbf{Jueves Negro} (caída de la bolsa) no fue la causa, sino el detonante de una serie de procesos:
\subsubsection{Políticas que inflaron la burbuja}
\begin{enumerate}
\item \textbf{Vuelta al proteccionismo}: Estados Unidos volvió a su política tradicional de proteger su mercado interno con altos aranceles, cerrando mercados a las exportaciones europeas.
\item \textbf{Aumento de tasas de interés}: La Reserva Federal aumentó las tasas, atrayendo capitales de todo el mundo hacia Estados Unidos.
\begin{ejemplo}[Fuga de capitales]
Los capitales que estaban invertidos en Alemania y Latinoamérica se retiraron masivamente para buscar mayores retornos en Estados Unidos. Esto provocó crisis económicas en la periferia.
\end{ejemplo}
\item \textbf{Especulación bursátil}: Los capitales no se volcaron a la producción real (fábricas, empleo), sino a la especulación con acciones.
\item \textbf{Economía frágil en el crédito}: Tanto la economía norteamericana como la mundial dependían excesivamente del crédito.
\end{enumerate}
\subsection{El ciclo de la crisis}
\begin{center}
\begin{tikzpicture}[
node distance=1.5cm,
auto,
block/.style={rectangle, draw, fill=blue!10, text width=6cm, text centered, rounded corners, minimum height=1cm},
arrow/.style={-Stealth, thick}
]
\node [block] (tasa) {Aumento de tasas de interés en EE.UU.};
\node [block, below=of tasa] (fuga) {Fuga de capitales de periferia y Europa};
\node [block, below=of fuga] (crisis) {Crisis económica en Europa y Latinoamérica};
\node [block, below=of crisis] (reduccion) {Reducción de importaciones de manufacturas};
\node [block, below=of reduccion] (sobreproduccion) {Sobreproducción en EE.UU.};
\node [block, below=of sobreproduccion] (desempleo) {Caída de producción $\rightarrow$ Desempleo};
\node [block, below=of desempleo, fill=red!10] (especulacion) {Especulación bursátil (capitales sin producción real)};
\node [block, below=of especulacion, fill=red!20] (crash) {CRACK DEL JUEVES NEGRO};
\draw [arrow] (tasa) -- (fuga);
\draw [arrow] (fuga) -- (crisis);
\draw [arrow] (crisis) -- (reduccion);
\draw [arrow] (reduccion) -- (sobreproduccion);
\draw [arrow] (sobreproduccion) -- (desempleo);
\draw [arrow] (desempleo) -- (especulacion);
\draw [arrow] (especulacion) -- (crash);
\end{tikzpicture}
\end{center}
\subsection{Consecuencias del crack}
\subsubsection{Pánico bancario}
La desconfianza se contagió del mercado bursátil a los bancos:
\begin{enumerate}
\item Los tenedores de acciones vendieron masivamente
\item La desconfianza se extendió a los depositantes bancarios
\item Retiros masivos de ahorros (corridas bancarias)
\item Los bancos no tenían fondos para hacer frente a los retiros
\item Más de la mitad de los bancos de EE.UU. quebraron
\end{enumerate}
\subsubsection{Efectos reales de la Gran Depresión}
\begin{itemize}
\item \textbf{Caída de la producción}: Las empresas quebraron por falta de liquidez
\item \textbf{Desempleo masivo}: El desempleo llegó al 25\%
\item \textbf{Caída del consumo}: De la prosperidad de los años 20 a filas por un plato de comida
\item \textbf{Pérdida de ahorros}: Los trabajadores perdieron sus ahorros depositados en bancos quebrados
\end{itemize}
\begin{importante}[Distinción crucial]
Es fundamental distinguir entre las \textbf{explicaciones} de la crisis (teorías económicas que surgieron para explicarla) y las \textbf{causales} de la crisis (los procesos concretos que la provocaron).
\end{importante}
\section{Comparación: Revolución Rusa vs. Gran Depresión}
\begin{table}[h]
\centering
\small
\begin{tabular}{@{}p{0.45\textwidth}@{}p{0.45\textwidth}@{}}
\toprule
\textbf{Revolución Rusa} & \textbf{Gran Depresión} \\
\midrule
Crisis en economía agrícola atrasada & Crisis en economía industrial avanzada \\
Respuesta: revolución socialista & Respuesta: Nuevas teorías económicas intervencionistas \\
Estado toma control de la economía & Estado interviene para corregir mercado \\
Planificación centralizada & Mantención de mercado con regulación \\
Colectivización forzada & Keynesianismo y New Deal \\
\bottomrule
\end{tabular}
\caption{Comparación de las dos grandes crisis del siglo XX}
\end{table}
\section{Glosario de Términos Técnicos}
\begin{description}[style=multiline, leftmargin=3cm, font=\bfseries]
\item[Proletariado] Clase trabajadora industrial que carece de medios de producción y debe vender su fuerza de trabajo.
\item[Socialdemocracia] Corriente política que busca acceder al poder por vía democrática para implementar reformas sociales y económicas graduales.
\item[Estado de Bienestar] Forma de Estado que garantiza servicios sociales y protección a los ciudadanos (salud, educación, seguridad social).
\item[Estado Intervencionista] Estado que interviene activamente en la economía para proteger la producción nacional y regular el mercado.
\item[Comunismo de Guerra] Primera etapa de la Revolución Rusa caracterizada por la expropiación de tierras y empresas, y nacionalización de la banca.
\item[NEP] Nueva Política Económica implementada por Lenin que otorgó libertad de mercado al sector agrícola para reactivar la economía.
\item[Colectivización] Proceso de reunir las tierras privadas en granjas estatales, eliminando la propiedad privada de la tierra.
\item[Koljoses] Granjas colectivas en la Unión Soviética donde los campesinos trabajaban la tierra en cooperativa.
\item[Sovjoses] Granjas estatales en la Unión Soviética administradas directamente por el Estado.
\item[Soviets] Consejos de delegados obreros y campesinos que surgieron durante la Revolución Rusa.
\item[Socialismo marxista] Sistema económico basado en la propiedad social de los medios de producción y la abolición de la propiedad privada.
\item[Keynesianismo] Teoría económica desarrollada por John Maynard Keynes que aboga por la intervención del Estado para regular los ciclos económicos.
\item[New Deal] Conjunto de políticas implementadas por Franklin D. Roosevelt para combatir la Gran Depresión en Estados Unidos.
\item[Taylorismo] Sistema de organización científica del trabajo desarrollado por Frederick Taylor que maximiza la eficiencia productiva.
\item[Fordismo] Sistema de producción en cadena desarrollado por Henry Ford que combina producción masiva con salarios altos para consumo masivo.
\end{description}
\section{Conclusiones}
\subsection{Lecciones de la Revolución Rusa}
\begin{enumerate}
\item La revolución marxista no se dio en un país industrializado, sino en uno agrario, contradiciendo las predicciones de Marx.
\item Las tres etapas (Comunismo de Guerra, NEP, Colectivización) muestran las tensiones entre teoría y práctica.
\item El costo humano de la industrialización forzada fue enorme.
\item La burocratización del Estado capturó la revolución, creando una nueva élite.
\end{enumerate}
\subsection{Lecciones de la Gran Depresión}
\begin{enumerate}
\item Las políticas monetarias y comerciales de una potencia tienen efectos globales.
\item La especulación financiera desacoplada de la producción real genera burbujas insostenibles.
\item La economía basada en el crédito es vulnerable a pánicos y corridas.
\item El colapso del sistema financiero se transmite rápidamente a la economía real.
\end{enumerate}
\subsection{Impacto en la teoría económica}
Ambos eventos llevaron al desarrollo de nuevas teorías económicas:
\begin{itemize}
\item La planificación centralizada como alternativa al mercado
\item El keynesianismo y la macroeconomía moderna
\item El Estado de bienestar como estabilizador social
\item La regulación financiera como prevención de crisis
\end{itemize}
\end{document}

1089
latex/imperio_romano.tex Normal file

File diff suppressed because it is too large Load Diff

134
list_notion_pages.py Normal file
View File

@@ -0,0 +1,134 @@
#!/usr/bin/env python3
"""
Script para listar todas las páginas y bases de datos accesibles
"""
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent))
from config import settings
from notion_client import Client
def main():
print("\n" + "=" * 70)
print("📚 LISTANDO TODAS LAS PÁGINAS Y BASES DE DATOS")
print("=" * 70 + "\n")
token = settings.NOTION_API_TOKEN
client = Client(auth=token)
try:
# Buscar todas las páginas sin filtro
print("🔍 Buscando todas las páginas accesibles...\n")
results = client.search(page_size=100)
all_items = results.get("results", [])
# Separar bases de datos y páginas
databases = [item for item in all_items if item.get("object") == "database"]
pages = [item for item in all_items if item.get("object") == "page"]
print(
f"✅ Encontrados: {len(databases)} base(s) de datos y {len(pages)} página(s)\n"
)
if databases:
print("=" * 70)
print("📊 BASES DE DATOS ENCONTRADAS:")
print("=" * 70)
for i, db in enumerate(databases, 1):
db_id = db.get("id", "N/A")
title_list = db.get("title", [])
title = (
title_list[0].get("plain_text", "Sin título")
if title_list
else "Sin título"
)
print(f"\n🔷 {i}. {title}")
print(f" ID: {db_id}")
print(f" URL: https://notion.so/{db_id.replace('-', '')}")
# Mostrar propiedades
props = db.get("properties", {})
if props:
print(f" Propiedades:")
for prop_name, prop_data in list(props.items())[:5]:
prop_type = prop_data.get("type", "unknown")
print(f"{prop_name} ({prop_type})")
if len(props) > 5:
print(f" ... y {len(props) - 5} más")
print("-" * 70)
if pages:
print("\n" + "=" * 70)
print("📄 PÁGINAS ENCONTRADAS:")
print("=" * 70)
for i, page in enumerate(pages, 1):
page_id = page.get("id", "N/A")
# Intentar obtener el título
title = "Sin título"
props = page.get("properties", {})
# Buscar en diferentes ubicaciones del título
if "title" in props:
title_prop = props["title"]
if "title" in title_prop:
title_list = title_prop["title"]
if title_list:
title = title_list[0].get("plain_text", "Sin título")
elif "Name" in props:
name_prop = props["Name"]
if "title" in name_prop:
title_list = name_prop["title"]
if title_list:
title = title_list[0].get("plain_text", "Sin título")
print(f"\n🔷 {i}. {title}")
print(f" ID: {page_id}")
print(f" URL: https://notion.so/{page_id.replace('-', '')}")
print("-" * 70)
if databases:
print("\n" + "=" * 70)
print("💡 SIGUIENTE PASO:")
print("=" * 70)
print("\nSi 'CBC' aparece arriba como BASE DE DATOS:")
print("1. Copia el ID de la base de datos 'CBC'")
print("2. Actualiza tu .env:")
print(" NOTION_DATABASE_ID=<el_id_completo>")
print("\nSi 'CBC' aparece como PÁGINA:")
print("1. Abre la página en Notion")
print("2. Busca una base de datos dentro de esa página")
print("3. Haz click en '...' de la base de datos")
print("4. Selecciona 'Copy link to view'")
print("5. El ID estará en el URL copiado")
print("\n4. Ejecuta: python test_notion_integration.py\n")
else:
print("\n⚠️ No se encontraron bases de datos accesibles.")
print("\n📋 OPCIONES:")
print("\n1. Crear una nueva base de datos:")
print(" - Abre una de las páginas listadas arriba")
print(" - Crea una tabla/database dentro")
print(" - Copia el ID de esa base de datos")
print("\n2. O comparte una base de datos existente:")
print(" - Abre tu base de datos 'CBC' en Notion")
print(" - Click en '...' > 'Connections'")
print(" - Agrega tu integración\n")
except Exception as e:
print(f"❌ Error: {e}\n")
import traceback
traceback.print_exc()
if __name__ == "__main__":
main()

602
main.py Normal file
View File

@@ -0,0 +1,602 @@
#!/usr/bin/env python3
"""
CBCFacil - Main Service Entry Point
Unified AI service for document processing (audio, PDF, text)
"""
import logging
import sys
import time
import fcntl
import os
import json
import threading
from pathlib import Path
from datetime import datetime
from typing import Optional
# Load environment variables from .env file
from dotenv import load_dotenv
load_dotenv()
# Configure logging with JSON formatter for production
class JSONFormatter(logging.Formatter):
"""JSON formatter for structured logging in production"""
def format(self, record):
log_entry = {
"timestamp": datetime.utcnow().isoformat() + "Z",
"level": record.levelname,
"message": record.getMessage(),
"module": record.module,
"function": record.funcName,
"line": record.lineno,
}
# Add exception info if present
if record.exc_info:
log_entry["exception"] = self.formatException(record.exc_info)
return json.dumps(log_entry)
def setup_logging() -> logging.Logger:
"""Setup logging configuration"""
from config import settings
# Create logger
logger = logging.getLogger(__name__)
logger.setLevel(getattr(logging, settings.LOG_LEVEL.upper()))
# Remove existing handlers
logger.handlers.clear()
# Console handler
console_handler = logging.StreamHandler(sys.stdout)
if settings.is_production:
console_handler.setFormatter(JSONFormatter())
else:
console_handler.setFormatter(
logging.Formatter("%(asctime)s [%(levelname)s] - %(name)s - %(message)s")
)
logger.addHandler(console_handler)
# File handler if configured
if settings.LOG_FILE:
file_handler = logging.FileHandler(settings.LOG_FILE)
file_handler.setFormatter(JSONFormatter())
logger.addHandler(file_handler)
return logger
logger = setup_logging()
def acquire_lock() -> int:
"""Acquire single instance lock"""
lock_file = (
Path(os.getenv("LOCAL_STATE_DIR", str(Path(__file__).parent)))
/ ".main_service.lock"
)
lock_file.parent.mkdir(parents=True, exist_ok=True)
lock_fd = open(lock_file, "w")
fcntl.flock(lock_fd.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
lock_fd.write(str(os.getpid()))
lock_fd.flush()
logger.info(f"Lock acquired. PID: {os.getpid()}")
return lock_fd
def release_lock(lock_fd) -> None:
"""Release lock"""
try:
fcntl.flock(lock_fd.fileno(), fcntl.LOCK_UN)
lock_fd.close()
except Exception as e:
logger.warning(f"Could not release lock: {e}")
def validate_configuration() -> None:
"""Validate configuration at startup"""
from config.validators import validate_environment, ConfigurationError
try:
warnings = validate_environment()
if warnings:
logger.info(
f"Configuration validation completed with {len(warnings)} warnings"
)
except ConfigurationError as e:
logger.error(f"Configuration validation failed: {e}")
raise
def check_service_health() -> dict:
"""
Check health of all external services
Returns dict with health status
"""
from config import settings
from services.webdav_service import webdav_service
health_status = {
"timestamp": datetime.utcnow().isoformat(),
"status": "healthy",
"services": {},
}
# Check WebDAV
try:
if settings.has_webdav_config:
# Try a simple operation
webdav_service.list(".")
health_status["services"]["webdav"] = {"status": "healthy"}
else:
health_status["services"]["webdav"] = {"status": "not_configured"}
except Exception as e:
health_status["services"]["webdav"] = {"status": "unhealthy", "error": str(e)}
health_status["status"] = "degraded"
# Check Telegram
try:
from services.telegram_service import telegram_service
if telegram_service.is_configured:
health_status["services"]["telegram"] = {"status": "healthy"}
else:
health_status["services"]["telegram"] = {"status": "not_configured"}
except Exception as e:
health_status["services"]["telegram"] = {
"status": "unavailable",
"error": str(e),
}
# Check VRAM manager
try:
from services.vram_manager import vram_manager
vram_info = vram_manager.get_vram_info()
health_status["services"]["vram"] = {
"status": "healthy",
"available_gb": vram_info.get("free", 0) / (1024**3),
}
except Exception as e:
health_status["services"]["vram"] = {"status": "unavailable", "error": str(e)}
return health_status
def initialize_services() -> None:
"""Initialize all services with configuration validation"""
from config import settings
from services.webdav_service import webdav_service
from services.vram_manager import vram_manager
from services.telegram_service import telegram_service
from storage.processed_registry import processed_registry
logger.info("Initializing services...")
# Validate configuration
validate_configuration()
# Warn if WebDAV not configured
if not settings.has_webdav_config:
logger.warning("WebDAV not configured - file sync functionality disabled")
# Warn if AI providers not configured
if not settings.has_ai_config:
logger.warning("AI providers not configured - summary generation will not work")
# Configure Telegram if credentials available
if settings.TELEGRAM_TOKEN and settings.TELEGRAM_CHAT_ID:
try:
telegram_service.configure(
settings.TELEGRAM_TOKEN, settings.TELEGRAM_CHAT_ID
)
telegram_service.send_start_notification()
logger.info("Telegram notifications enabled")
except Exception as e:
logger.error(f"Failed to configure Telegram: {e}")
# Configure Notion if credentials available
if settings.has_notion_config:
try:
from services.notion_service import notion_service
notion_service.configure(
settings.NOTION_API_TOKEN, settings.NOTION_DATABASE_ID
)
logger.info("✅ Notion integration enabled")
except Exception as e:
logger.error(f"Failed to configure Notion: {e}")
else:
logger.info("Notion not configured - upload to Notion disabled")
# Initialize WebDAV if configured
if settings.has_webdav_config:
try:
webdav_service.initialize()
logger.info("WebDAV service initialized")
except Exception as e:
logger.error(f"Failed to initialize WebDAV: {e}")
logger.exception("WebDAV initialization error details")
else:
logger.info("Skipping WebDAV initialization (not configured)")
# Initialize VRAM manager
try:
vram_manager.initialize()
logger.info("VRAM manager initialized")
except Exception as e:
logger.error(f"Failed to initialize VRAM manager: {e}")
logger.exception("VRAM manager initialization error details")
# Initialize processed registry
try:
processed_registry.initialize()
logger.info("Processed registry initialized")
except Exception as e:
logger.error(f"Failed to initialize processed registry: {e}")
logger.exception("Registry initialization error details")
# Run health check
health = check_service_health()
logger.info(f"Initial health check: {json.dumps(health, indent=2)}")
logger.info("All services initialized successfully")
def send_error_notification(error_type: str, error_message: str) -> None:
"""Send error notification via Telegram"""
try:
from services.telegram_service import telegram_service
if telegram_service.is_configured:
telegram_service.send_error_notification(error_type, error_message)
except Exception as e:
logger.warning(f"Failed to send error notification: {e}")
def run_dashboard_thread() -> None:
"""Run Flask dashboard in a separate thread"""
try:
from api.routes import create_app
app = create_app()
# Run Flask in production mode with threaded=True
app.run(
host="0.0.0.0",
port=5000,
debug=False,
threaded=True,
use_reloader=False, # Important: disable reloader in thread
)
except Exception as e:
logger.error(f"Dashboard thread error: {e}")
logger.exception("Dashboard thread exception details")
def start_dashboard() -> threading.Thread:
"""Start dashboard in a background daemon thread"""
dashboard_port = int(os.getenv("DASHBOARD_PORT", "5000"))
logger.info(f"Starting dashboard on port {dashboard_port}...")
# Create daemon thread so it doesn't block shutdown
dashboard_thread = threading.Thread(
target=run_dashboard_thread, name="DashboardThread", daemon=True
)
dashboard_thread.start()
logger.info(f"Dashboard thread started (Thread-ID: {dashboard_thread.ident})")
return dashboard_thread
def run_main_loop() -> None:
"""Main processing loop with improved error handling"""
from config import settings
from services.webdav_service import webdav_service
from storage.processed_registry import processed_registry
from processors.audio_processor import AudioProcessor
from processors.pdf_processor import PDFProcessor
from processors.text_processor import TextProcessor
audio_processor = AudioProcessor()
pdf_processor = PDFProcessor()
text_processor = TextProcessor()
consecutive_errors = 0
max_consecutive_errors = 5
while True:
try:
logger.info("--- Polling for new files ---")
processed_registry.load()
# Process PDFs
if settings.has_webdav_config:
try:
webdav_service.mkdir(settings.REMOTE_PDF_FOLDER)
pdf_files = webdav_service.list(settings.REMOTE_PDF_FOLDER)
for file_path in pdf_files:
if file_path.lower().endswith(".pdf"):
if not processed_registry.is_processed(file_path):
from pathlib import Path
from urllib.parse import unquote
from services.telegram_service import telegram_service
local_filename = unquote(Path(file_path).name)
base_name = Path(local_filename).stem
local_path = (
settings.LOCAL_DOWNLOADS_PATH / local_filename
)
settings.LOCAL_DOWNLOADS_PATH.mkdir(
parents=True, exist_ok=True
)
# Step 1: Notify and download
telegram_service.send_message(
f"📄 Nuevo PDF detectado: {local_filename}\n"
f"⬇️ Descargando..."
)
logger.info(
f"Downloading PDF: {file_path} -> {local_path}"
)
webdav_service.download(file_path, local_path)
# Step 2: Process PDF
telegram_service.send_message(
f"🔍 Procesando PDF con OCR..."
)
pdf_processor.process(str(local_path))
processed_registry.save(file_path)
except Exception as e:
logger.exception(f"Error processing PDFs: {e}")
send_error_notification("pdf_processing", str(e))
# Process Audio files
if settings.has_webdav_config:
try:
audio_files = webdav_service.list(settings.REMOTE_AUDIOS_FOLDER)
for file_path in audio_files:
if any(
file_path.lower().endswith(ext)
for ext in settings.AUDIO_EXTENSIONS
):
if not processed_registry.is_processed(file_path):
from pathlib import Path
from urllib.parse import unquote
from document.generators import DocumentGenerator
from services.telegram_service import telegram_service
local_filename = unquote(Path(file_path).name)
base_name = Path(local_filename).stem
local_path = (
settings.LOCAL_DOWNLOADS_PATH / local_filename
)
settings.LOCAL_DOWNLOADS_PATH.mkdir(
parents=True, exist_ok=True
)
# Step 1: Notify and download
telegram_service.send_message(
f"🎵 Nuevo audio detectado: {local_filename}\n"
f"⬇️ Descargando..."
)
logger.info(
f"Downloading audio: {file_path} -> {local_path}"
)
webdav_service.download(file_path, local_path)
# Step 2: Transcribe
telegram_service.send_message(
f"📝 Transcribiendo audio con Whisper..."
)
result = audio_processor.process(str(local_path))
if result.get("success") and result.get(
"transcription_path"
):
transcription_file = Path(
result["transcription_path"]
)
transcription_text = result.get("text", "")
# Step 3: Generate AI summary and documents
telegram_service.send_message(
f"🤖 Generando resumen académico LaTeX..."
)
doc_generator = DocumentGenerator(
notification_callback=lambda msg: telegram_service.send_message(msg)
)
success, summary, output_files = (
doc_generator.generate_summary(
transcription_text, base_name
)
)
# Step 4: Upload all files to Nextcloud
if success and output_files:
# Create folders
for folder in [
settings.RESUMENES_FOLDER,
settings.DOCX_FOLDER,
]:
try:
webdav_service.makedirs(folder)
except Exception:
pass
# Upload all files in parallel using batch upload
upload_tasks = []
# Upload transcription TXT
if transcription_file.exists():
remote_txt = f"{settings.RESUMENES_FOLDER}/{transcription_file.name}"
upload_tasks.append((transcription_file, remote_txt))
# Upload DOCX
docx_path = Path(
output_files.get("docx_path", "")
)
if docx_path.exists():
remote_docx = f"{settings.DOCX_FOLDER}/{docx_path.name}"
upload_tasks.append((docx_path, remote_docx))
# Upload PDF
pdf_path = Path(
output_files.get("pdf_path", "")
)
if pdf_path.exists():
remote_pdf = f"{settings.DOCX_FOLDER}/{pdf_path.name}"
upload_tasks.append((pdf_path, remote_pdf))
# Upload Markdown
md_path = Path(
output_files.get("markdown_path", "")
)
if md_path.exists():
remote_md = f"{settings.RESUMENES_FOLDER}/{md_path.name}"
upload_tasks.append((md_path, remote_md))
# Execute parallel uploads
if upload_tasks:
upload_results = webdav_service.upload_batch(
upload_tasks, max_workers=4, timeout=120
)
logger.info(f"Parallel upload complete: {len(upload_results)} files")
# Final notification
telegram_service.send_message(
f"✅ Audio procesado: {local_filename}\n"
f"📄 DOCX: {docx_path.name if docx_path.exists() else 'N/A'}\n"
f"📑 PDF: {pdf_path.name if pdf_path.exists() else 'N/A'}\n"
f"☁️ Subido a Nextcloud"
)
else:
# Just upload transcription if summary failed
if transcription_file.exists():
try:
webdav_service.makedirs(
settings.RESUMENES_FOLDER
)
except Exception:
pass
remote_txt = f"{settings.RESUMENES_FOLDER}/{transcription_file.name}"
webdav_service.upload(
transcription_file, remote_txt
)
telegram_service.send_message(
f"⚠️ Resumen fallido, solo transcripción subida:\n{transcription_file.name}"
)
processed_registry.save(file_path)
except Exception as e:
logger.exception(f"Error processing audio: {e}")
send_error_notification("audio_processing", str(e))
# Process Text files
if settings.has_webdav_config:
try:
text_files = webdav_service.list(settings.REMOTE_TXT_FOLDER)
for file_path in text_files:
if any(
file_path.lower().endswith(ext)
for ext in settings.TXT_EXTENSIONS
):
if not processed_registry.is_processed(file_path):
text_processor.process(file_path)
processed_registry.save(file_path)
except Exception as e:
logger.exception(f"Error processing text: {e}")
send_error_notification("text_processing", str(e))
# Reset error counter on success
consecutive_errors = 0
except Exception as e:
# Improved error logging with full traceback
logger.exception(f"Critical error in main loop: {e}")
# Send notification for critical errors
send_error_notification("main_loop", str(e))
# Track consecutive errors
consecutive_errors += 1
if consecutive_errors >= max_consecutive_errors:
logger.critical(
f"Too many consecutive errors ({consecutive_errors}). "
"Service may be unstable. Consider checking configuration."
)
send_error_notification(
"consecutive_errors",
f"Service has failed {consecutive_errors} consecutive times",
)
# Don't exit, let the loop continue with backoff
logger.info(f"Waiting {settings.POLL_INTERVAL * 2} seconds before retry...")
time.sleep(settings.POLL_INTERVAL * 2)
continue
logger.info(f"Cycle completed. Waiting {settings.POLL_INTERVAL} seconds...")
time.sleep(settings.POLL_INTERVAL)
def main():
"""Main entry point"""
lock_fd = None
dashboard_thread = None
try:
logger.info("=== CBCFacil Service Started ===")
logger.info(f"Version: {os.getenv('APP_VERSION', '8.0')}")
logger.info(
f"Environment: {'production' if os.getenv('DEBUG', 'false').lower() != 'true' else 'development'}"
)
lock_fd = acquire_lock()
initialize_services()
# Start dashboard in background thread
dashboard_thread = start_dashboard()
# Run main processing loop
run_main_loop()
except KeyboardInterrupt:
logger.info("Shutdown requested by user")
except Exception as e:
logger.exception(f"Fatal error in main: {e}")
send_error_notification("fatal_error", str(e))
sys.exit(1)
finally:
if lock_fd:
release_lock(lock_fd)
logger.info("=== CBCFacil Service Stopped ===")
if __name__ == "__main__":
# Handle CLI commands
if len(sys.argv) > 1:
command = sys.argv[1]
if command == "whisper" and len(sys.argv) == 4:
from processors.audio_processor import AudioProcessor
AudioProcessor().process(sys.argv[2])
elif command == "pdf" and len(sys.argv) == 4:
from processors.pdf_processor import PDFProcessor
PDFProcessor().process(sys.argv[2])
elif command == "health":
from main import check_service_health
health = check_service_health()
print(json.dumps(health, indent=2))
else:
print("Usage: python main.py [whisper|pdf|health]")
sys.exit(1)
else:
main()

148
main.py.backup Normal file
View File

@@ -0,0 +1,148 @@
#!/usr/bin/env python3
"""
CBCFacil - Main Service Entry Point
Unified AI service for document processing (audio, PDF, text)
"""
import logging
import sys
import time
import fcntl
import os
from pathlib import Path
# Configure logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] - %(message)s"
)
logger = logging.getLogger(__name__)
def acquire_lock() -> int:
"""Acquire single instance lock"""
lock_file = Path(os.getenv("LOCAL_STATE_DIR", str(Path(__file__).parent))) / ".main_service.lock"
lock_file.parent.mkdir(parents=True, exist_ok=True)
lock_fd = open(lock_file, 'w')
fcntl.flock(lock_fd.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
lock_fd.write(str(os.getpid()))
lock_fd.flush()
logger.info(f"Lock acquired. PID: {os.getpid()}")
return lock_fd
def release_lock(lock_fd) -> None:
"""Release lock"""
try:
fcntl.flock(lock_fd.fileno(), fcntl.LOCK_UN)
lock_fd.close()
except Exception as e:
logger.warning(f"Could not release lock: {e}")
def initialize_services() -> None:
"""Initialize all services"""
from config import settings
from services.webdav_service import webdav_service
from services.vram_manager import vram_manager
from services.telegram_service import telegram_service
from storage.processed_registry import processed_registry
# Configure Telegram if credentials available
if settings.TELEGRAM_TOKEN and settings.TELEGRAM_CHAT_ID:
telegram_service.configure(settings.TELEGRAM_TOKEN, settings.TELEGRAM_CHAT_ID)
telegram_service.send_start_notification()
# Initialize WebDAV if configured
if settings.has_webdav_config:
webdav_service.initialize()
# Initialize VRAM manager
vram_manager.initialize()
# Initialize processed registry
processed_registry.initialize()
logger.info("All services initialized")
def run_main_loop() -> None:
"""Main processing loop"""
from config import settings
from services.webdav_service import webdav_service
from storage.processed_registry import processed_registry
from processors.audio_processor import AudioProcessor
from processors.pdf_processor import PDFProcessor
from processors.text_processor import TextProcessor
audio_processor = AudioProcessor()
pdf_processor = PDFProcessor()
text_processor = TextProcessor()
while True:
try:
logger.info("--- Polling for new files ---")
processed_registry.load()
# Process PDFs
if settings.has_webdav_config:
webdav_service.mkdir(settings.REMOTE_PDF_FOLDER)
pdf_files = webdav_service.list(settings.REMOTE_PDF_FOLDER)
for file_path in pdf_files:
if file_path.lower().endswith('.pdf'):
if not processed_registry.is_processed(file_path):
pdf_processor.process(file_path)
processed_registry.save(file_path)
# Process Audio files
if settings.has_webdav_config:
audio_files = webdav_service.list(settings.REMOTE_AUDIOS_FOLDER)
for file_path in audio_files:
if any(file_path.lower().endswith(ext) for ext in settings.AUDIO_EXTENSIONS):
if not processed_registry.is_processed(file_path):
audio_processor.process(file_path)
processed_registry.save(file_path)
# Process Text files
if settings.has_webdav_config:
text_files = webdav_service.list(settings.REMOTE_TXT_FOLDER)
for file_path in text_files:
if any(file_path.lower().endswith(ext) for ext in settings.TXT_EXTENSIONS):
if not processed_registry.is_processed(file_path):
text_processor.process(file_path)
processed_registry.save(file_path)
except Exception as e:
logger.error(f"Error in main loop: {e}")
logger.info(f"Cycle completed. Waiting {settings.POLL_INTERVAL} seconds...")
time.sleep(settings.POLL_INTERVAL)
def main():
"""Main entry point"""
lock_fd = acquire_lock()
try:
logger.info("=== CBCFacil Service Started ===")
initialize_services()
run_main_loop()
except KeyboardInterrupt:
logger.info("Shutdown requested")
finally:
release_lock(lock_fd)
if __name__ == "__main__":
# Handle CLI commands
if len(sys.argv) > 1:
command = sys.argv[1]
if command == "whisper" and len(sys.argv) == 4:
from processors.audio_processor import AudioProcessor
AudioProcessor().process(sys.argv[2])
elif command == "pdf" and len(sys.argv) == 4:
from processors.pdf_processor import PDFProcessor
PDFProcessor().process(sys.argv[2])
else:
print("Usage: python main.py [whisper|pdf]")
sys.exit(1)
else:
main()

15
processors/__init__.py Normal file
View File

@@ -0,0 +1,15 @@
"""
Processors package for CBCFacil
"""
from .base_processor import FileProcessor
from .audio_processor import AudioProcessor
from .pdf_processor import PDFProcessor
from .text_processor import TextProcessor
__all__ = [
'FileProcessor',
'AudioProcessor',
'PDFProcessor',
'TextProcessor'
]

View File

@@ -0,0 +1,93 @@
"""
Audio file processor using Whisper
"""
import logging
from pathlib import Path
from typing import Dict, Any
from core import FileProcessingError
from config import settings
from services import vram_manager
from services.gpu_detector import gpu_detector
from .base_processor import FileProcessor
try:
import whisper
import torch
WHISPER_AVAILABLE = True
except ImportError:
WHISPER_AVAILABLE = False
class AudioProcessor(FileProcessor):
"""Processor for audio files using Whisper"""
def __init__(self):
super().__init__("AudioProcessor")
self.logger = logging.getLogger(__name__)
self._model = None
self._model_name = "medium" # Optimized for Spanish
def can_process(self, file_path: str) -> bool:
"""Check if file is an audio file"""
ext = self.get_file_extension(file_path)
return ext in settings.AUDIO_EXTENSIONS
def _load_model(self):
"""Load Whisper model lazily"""
if not WHISPER_AVAILABLE:
raise FileProcessingError("Whisper not installed")
if self._model is None:
device = gpu_detector.get_device()
self.logger.info(f"Loading Whisper model: {self._model_name} on {device}")
self._model = whisper.load_model(self._model_name, device=device)
vram_manager.update_usage()
def process(self, file_path: str) -> Dict[str, Any]:
"""Transcribe audio file"""
self.validate_file(file_path)
audio_path = Path(file_path)
output_path = settings.LOCAL_DOWNLOADS_PATH / f"{audio_path.stem}.txt"
self.logger.info(f"Processing audio file: {audio_path}")
try:
# Load model if needed
self._load_model()
# Update VRAM usage
vram_manager.update_usage()
# Transcribe with torch.no_grad() for memory efficiency
with torch.inference_mode():
result = self._model.transcribe(
str(audio_path),
language="es",
fp16=True,
verbose=False
)
# Save transcription
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, 'w', encoding='utf-8') as f:
f.write(result["text"])
self.logger.info(f"Transcription completed: {output_path}")
return {
"success": True,
"transcription_path": str(output_path),
"text": result["text"],
"model_used": self._model_name
}
except Exception as e:
self.logger.error(f"Audio processing failed: {e}")
raise FileProcessingError(f"Audio processing failed: {e}")
def cleanup(self) -> None:
"""Cleanup model"""
if self._model is not None:
del self._model
self._model = None
vram_manager.cleanup()

View File

@@ -0,0 +1,40 @@
"""
Base File Processor (Strategy Pattern)
"""
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, Any, Optional
from core import FileProcessingError
class FileProcessor(ABC):
"""Abstract base class for file processors"""
def __init__(self, name: str):
self.name = name
@abstractmethod
def can_process(self, file_path: str) -> bool:
"""Check if processor can handle this file type"""
pass
@abstractmethod
def process(self, file_path: str) -> Dict[str, Any]:
"""Process the file"""
pass
def get_file_extension(self, file_path: str) -> str:
"""Get file extension from path"""
return Path(file_path).suffix.lower()
def get_base_name(self, file_path: str) -> str:
"""Get base name without extension"""
return Path(file_path).stem
def validate_file(self, file_path: str) -> None:
"""Validate file exists and is accessible"""
path = Path(file_path)
if not path.exists():
raise FileProcessingError(f"File not found: {file_path}")
if not path.is_file():
raise FileProcessingError(f"Path is not a file: {file_path}")

164
processors/pdf_processor.py Normal file
View File

@@ -0,0 +1,164 @@
"""
PDF file processor with OCR
"""
import logging
from pathlib import Path
from typing import Dict, Any
from concurrent.futures import ThreadPoolExecutor, as_completed
from core import FileProcessingError
from config import settings
from services import vram_manager
from services.gpu_detector import gpu_detector
from .base_processor import FileProcessor
try:
import torch
import pytesseract
import easyocr
import cv2
import numpy as np
from pdf2image import convert_from_path
from PIL import Image
PDF_OCR_AVAILABLE = True
except ImportError:
PDF_OCR_AVAILABLE = False
# Provide stub for type hints
try:
from PIL import Image
except ImportError:
Image = None # type: ignore
class PDFProcessor(FileProcessor):
"""Processor for PDF files with OCR"""
def __init__(self):
super().__init__("PDFProcessor")
self.logger = logging.getLogger(__name__)
self._easyocr_reader = None
def can_process(self, file_path: str) -> bool:
"""Check if file is a PDF"""
return self.get_file_extension(file_path) == ".pdf"
def _load_easyocr(self):
"""Load EasyOCR reader"""
if self._easyocr_reader is None:
use_gpu = gpu_detector.is_available()
self.logger.info(f"Loading EasyOCR reader (GPU: {use_gpu})")
self._easyocr_reader = easyocr.Reader(['es'], gpu=use_gpu)
vram_manager.update_usage()
def _preprocess_image(self, image: Image.Image) -> Image.Image:
"""Preprocess image for better OCR"""
# Convert to grayscale
if image.mode != 'L':
image = image.convert('L')
# Simple preprocessing
image = image.resize((image.width * 2, image.height * 2), Image.Resampling.LANCZOS)
return image
def _run_ocr_parallel(self, pil_images) -> Dict[str, list]:
"""Run all OCR engines in parallel"""
results = {
'easyocr': [''] * len(pil_images),
'tesseract': [''] * len(pil_images)
}
with ThreadPoolExecutor(max_workers=2) as executor:
futures = {}
# EasyOCR
if self._easyocr_reader:
futures['easyocr'] = executor.submit(
self._easyocr_reader.readtext_batched,
pil_images,
detail=0
)
# Tesseract
futures['tesseract'] = executor.submit(
lambda imgs: [pytesseract.image_to_string(img, lang='spa') for img in imgs],
pil_images
)
# Collect results
for name, future in futures.items():
try:
results[name] = future.result()
except Exception as e:
self.logger.error(f"OCR engine {name} failed: {e}")
results[name] = [''] * len(pil_images)
return results
def process(self, file_path: str) -> Dict[str, Any]:
"""Process PDF with OCR"""
self.validate_file(file_path)
pdf_path = Path(file_path)
output_path = settings.LOCAL_DOWNLOADS_PATH / f"{pdf_path.stem}.txt"
if not PDF_OCR_AVAILABLE:
raise FileProcessingError("PDF OCR dependencies not installed")
self.logger.info(f"Processing PDF file: {pdf_path}")
try:
# Load EasyOCR if needed
self._load_easyocr()
vram_manager.update_usage()
# Convert PDF to images
self.logger.debug("Converting PDF to images")
pil_images = convert_from_path(
str(pdf_path),
dpi=settings.PDF_DPI,
fmt='png',
thread_count=settings.PDF_RENDER_THREAD_COUNT
)
# Process in batches
all_text = []
batch_size = settings.PDF_BATCH_SIZE
for i in range(0, len(pil_images), batch_size):
batch = pil_images[i:i + batch_size]
self.logger.debug(f"Processing batch {i//batch_size + 1}/{(len(pil_images) + batch_size - 1)//batch_size}")
# Preprocess images
preprocessed_batch = [self._preprocess_image(img) for img in batch]
# Run OCR in parallel
ocr_results = self._run_ocr_parallel(preprocessed_batch)
# Combine results
for j, img in enumerate(batch):
# Take best result (simple approach: try EasyOCR first, then Tesseract)
text = ocr_results['easyocr'][j] if ocr_results['easyocr'][j] else ocr_results['tesseract'][j]
if text:
all_text.append(text)
# Save combined text
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, 'w', encoding='utf-8') as f:
f.write("\n\n".join(all_text))
self.logger.info(f"PDF processing completed: {output_path}")
return {
"success": True,
"text_path": str(output_path),
"text": "\n\n".join(all_text),
"pages_processed": len(pil_images)
}
except Exception as e:
self.logger.error(f"PDF processing failed: {e}")
raise FileProcessingError(f"PDF processing failed: {e}")
def cleanup(self) -> None:
"""Cleanup OCR models"""
self._easyocr_reader = None
vram_manager.cleanup()

View File

@@ -0,0 +1,55 @@
"""
Text file processor
"""
import logging
from pathlib import Path
from typing import Dict, Any
from core import FileProcessingError
from config import settings
from .base_processor import FileProcessor
class TextProcessor(FileProcessor):
"""Processor for text files"""
def __init__(self):
super().__init__("TextProcessor")
self.logger = logging.getLogger(__name__)
def can_process(self, file_path: str) -> bool:
"""Check if file is a text file"""
ext = self.get_file_extension(file_path)
return ext in settings.TXT_EXTENSIONS
def process(self, file_path: str) -> Dict[str, Any]:
"""Process text file (copy to downloads)"""
self.validate_file(file_path)
text_path = Path(file_path)
output_path = settings.LOCAL_DOWNLOADS_PATH / text_path.name
self.logger.info(f"Processing text file: {text_path}")
try:
# Copy file to downloads directory
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(text_path, 'r', encoding='utf-8') as src:
with open(output_path, 'w', encoding='utf-8') as dst:
dst.write(src.read())
self.logger.info(f"Text file processing completed: {output_path}")
return {
"success": True,
"text_path": str(output_path),
"text": self._read_file(output_path)
}
except Exception as e:
self.logger.error(f"Text processing failed: {e}")
raise FileProcessingError(f"Text processing failed: {e}")
def _read_file(self, file_path: Path) -> str:
"""Read file content"""
with open(file_path, 'r', encoding='utf-8') as f:
return f.read()

23
requirements-dev.txt Normal file
View File

@@ -0,0 +1,23 @@
# Development dependencies
pytest>=7.4.0
pytest-cov>=4.1.0
pytest-mock>=3.11.0
pytest-asyncio>=0.21.0
coverage>=7.3.0
# Code quality
black>=23.0.0
flake8>=6.0.0
mypy>=1.5.0
isort>=5.12.0
# Security
bandit>=1.7.5
safety>=2.3.0
# Performance testing
pytest-benchmark>=4.0.0
# Documentation
mkdocs>=1.5.0
mkdocs-material>=9.0.0

31
requirements.txt Executable file
View File

@@ -0,0 +1,31 @@
# Core web framework
Flask>=3.0.0
Flask-CORS>=4.0.0
# AI/ML dependencies
torch>=2.0.0
torchvision>=0.15.0
openai-whisper>=20231117
transformers>=4.30.0
easyocr>=1.7.0
# Image processing
Pillow>=10.0.0
opencv-python-headless>=4.8.0
# Document processing
pdf2image>=1.17.0
pypdf>=3.17.0
python-docx>=0.8.11
reportlab>=4.0.0
pytesseract>=0.3.10
# Utilities
numpy>=1.24.0
requests>=2.31.0
python-dotenv>=1.0.0
webdavclient3>=0.9.8
# Optional: for enhanced functionality
# unidecode>=1.3.7 # For filename normalization
# python-magic>=0.4.27 # For file type detection

10
restart_service.sh Executable file
View File

@@ -0,0 +1,10 @@
#!/bin/bash
# Detener servicio existente
pkill -f "python main.py"
sleep 2
# Reiniciar con log visible
cd /home/ren/proyectos/cbc
source .venv/bin/activate
python main.py >> main.log 2>&1 &
echo "Servicio reiniciado. Ver logs con: tail -f main.log"

953
resumen_curiosidades.tex Normal file
View File

@@ -0,0 +1,953 @@
\documentclass[11pt,a4paper]{article}
\usepackage[utf8]{inputenc}
\usepackage[spanish,provide=*]{babel}
\usepackage{amsmath,amssymb}
\usepackage{geometry}
\usepackage{graphicx}
\usepackage{tikz}
\usetikzlibrary{arrows.meta,positioning,shapes.geometric,calc,shapes.misc}
\usepackage{booktabs}
\usepackage{enumitem}
\usepackage{fancyhdr}
\usepackage{titlesec}
\usepackage{tcolorbox}
\usepackage{array}
\usepackage{multirow}
\usepackage{csquotes}
\usepackage{pgfplots}
\pgfplotsset{compat=1.18}
\geometry{margin=2.5cm}
\pagestyle{fancy}
\fancyhf{}
\fancyhead[L]{Curiosidades Científicas y Culturales}
\fancyhead[R]{Compilación Interdisciplinaria}
\fancyfoot[C]{\thepage}
% Cajas para destacar contenido
\newtcolorbox{definicion}[1][]{
colback=blue!5!white,
colframe=blue!75!black,
fonttitle=\bfseries,
title=#1,
sharp corners=downhill
}
\newtcolorbox{importante}[1][]{
colback=red!5!white,
colframe=red!75!black,
fonttitle=\bfseries,
title=#1,
sharp corners=downhill
}
\newtcolorbox{ejemplo}[1][]{
colback=green!5!white,
colframe=green!50!black,
fonttitle=\bfseries,
title=#1,
sharp corners=downhill
}
\newtcolorbox{dato}[1][]{
colback=yellow!5!white,
colframe=orange!75!black,
fonttitle=\bfseries,
title=#1,
sharp corners=downhill
}
\newtcolorbox{formula}[1][]{
colback=purple!5!white,
colframe=purple!75!black,
fonttitle=\bfseries,
title=#1,
sharp corners=downhill
}
\title{\textbf{25 Cosas que No Sabías Hace Cinco Minutos}\\[0.5cm]
\large{Un compendio interdisciplinario de curiosidades científicas,\\fenómenos culturales y avances tecnológicos}}
\author{Compilación Académica}
\date{\today}
\begin{document}
\maketitle
\thispagestyle{empty}
\tableofcontents
\newpage
\section{Introducción}
El conocimiento humano se caracteriza por su naturaleza fragmentada y especializada. Sin embargo, algunas de las comprensiones más valiosas surgen precisamente de la \textbf{interconexión entre disciplinas} aparentemente dispares. El presente documento compila veinticinco curiosidades que abarcan desde la gastronomía francesa hasta la bioacústica marina, desde la psicología del consumidor hasta la ingeniería de materiales, demostrando que el curiosity-driven learning --el aprendizaje impulsado por la curiosidad-- representa una de las formas más efectivas de adquirir conocimiento interconectado.
\begin{importante}[Enfoque Interdisciplinario]
Este documento está organizado para mostrar conexiones entre áreas tradicionalmente separadas del conocimiento. Cada curiosidad sirve como punto de entrada para explorar conceptos más profundos en física, biología, psicología, ingeniería y cultura.
\end{importante}
\section{Gastronomía y Cultura Alimentaria}
\subsection{El Hot Dog Francés: Adaptación Culinaría Transcultural}
\begin{definicion}[Hot Dog Francés]
Variante gastronómica del hot dog tradicional estadounidense que incorpora técnicas de panadería francesa y métodos de preparación distintivos.
\end{definicion}
\textbf{Diferenciación técnica con el hot dog tradicional:}
\begin{table}[h]
\centering
\begin{tabular}{@{}p{5cm}@{}p{5cm}@{}}
\toprule
\textbf{Hot Dog Tradicional} & \textbf{Hot Dog Francés} \\ \midrule
Pan blando de forma alargada & Baguette crujiente \\
Corte longitudinal del pan & Perforación central del pan \\
Salsas aplicadas externamente & Salsas inyectadas internamente \\
Queso cheddar o americano & Emmental o Gruyère rallado \\
Ensamblaje lineal & Técnica de relleno tubular \\ \bottomrule
\end{tabular}
\caption{Comparación técnica entre hot dog tradicional y francés}
\end{table}
\textbf{Proceso de preparación:}
\begin{enumerate}
\item \textbf{Selección del pan}: Baguette fresca, crujiente externamente, suave internamente
\item \textbf{Perforación}: Se crea un túnel central utilizando utensilio especializado
\item \textbf{Inyección de salsas}: Mayonesa, mostaza Dijon o variaciones regionales
\item \textbf{Inserción de salchicha}: Generalmente salchicha tipo Toulouse o similar
\item \textbf{Aplicación de queso}: Emmental o Gruyère rallado, parcialmente derretido
\end{enumerate}
\begin{ejemplo}[Contexto Cultural]
Este plato ilustra el concepto de \textit{glocalización} --adaptación de productos globales a preferencias locales-- donde el concepto estadounidense de hot dog se hibrida con la tradición panadera francesa de la baguette, creando un producto único que mantiene elementos de ambas culturas.
\end{ejemplo}
\subsection{Heinz Tomato Ketchup Smoothie: Innovación y Controversia}
\begin{dato}[Heinz Tomato Ketchup Smoothie]
Bebida desarrollada por Heinz que combina ketchup con ingredientes frutales, destacando la naturaleza botánica del tomate como fruta.
\end{dato}
\textbf{Composición y análisis:}
\begin{table}[h]
\centering
\begin{tabular}{@{}ll@{}}
\toprule
\textbf{Ingrediente} & \textbf{Función en la Bebida} \\ \midrule
Ketchup & Base, sabor ácido-dulce característico \\
Sorbete de açaí & Textura, antioxidantes, color púrpura \\
Jugo de manzana & Dulzor natural, líquido base \\
Fresas & Sabor frutal complementario, color \\
Frambuesas & Acidez, notas frutales, vitaminas \\ \bottomrule
\end{tabular}
\end{table}
\begin{definicion}[Solanum lycopersicum]
Nombre científico del tomate, botánicamente clasificado como una baya (fruta) aunque culinariamente tratado como vegetal. Esta ambigüedad clasificatoria permite la creación de productos que desafían las categorías culinarias tradicionales.
\end{definicion}
\textbf{Análisis desde la teoría de marketing:}
Este producto representa una estrategia de \textbf{diferenciación por extinción} --crear productos tan inusuales que generen conversación y cobertura mediática--, convirtiendo la controversial naturaleza del producto en su principal característica de marketing.
\section{Biología, Salud y Medicina}
\subsection{Propiedades Odontológicas del Brócoli}
\begin{importante}[Propiedad Antibacteriana]
El consumo regular de brócoli contribuye a la reducción de placa dental mediante la inhibición de \textit{Streptococcus mutans}, la bacteria primarily responsable de caries y enfermedad periodontal.
\end{importante}
\textbf{Mecanismo bioquímico de acción:}
\begin{itemize}
\item \textbf{Sulforafano}: Compuesto azufrado con propiedades antibacterianas
\item \textbf{Fibra mecánica}: Acción limpiadora abrasiva durante masticación
\item \textbf{Isothiocyanatos}: Modificación del pH bucal hacia ambientes menos favorables para bacterias cariogénicas
\item \textbf{Antioxidantes}: Protección del esmalte dental
\end{itemize}
\begin{definicion}[Streptococcus mutans]
Bacteria grampositiva, anaerobia facultativa, considerada el principal agente etiológico de caries dental en humanos. Su capacidad para formar biopelículas (placa) y metabolizar carbohidratos produciendo ácido láctico la hace particularmente patogénica para la estructura dental.
\end{definicion}
\textbf{Proceso de formación de placa:}
\begin{center}
\begin{tikzpicture}[
node distance=1.5cm,
process/.style={rectangle, draw, fill=blue!10, text width=3cm, align=center, rounded corners},
arrow/.style={-Stealth, thick}
]
\node[process] (bacterias) {Colonización de $S. mutans$};
\node[process, right=of bacterias] (biofilm) {Formación de biopelícula};
\node[process, right=of biofilm] (acido) {Producción de ácido};
\node[process, right=of acido, fill=red!10] (caries) {Demineralización del esmalte};
\draw[arrow] (bacterias) -- (biofilm);
\draw[arrow] (biofilm) -- (acido);
\draw[arrow] (acido) -- (caries);
\end{tikzpicture}
\end{center}
El brócoli interfiere específicamente en la etapa de colonización, reduciendo la capacidad de $S. mutans$ de adherirse a la superficie dental y formar biopelículas cohesivas.
\subsection{Mimetismo en el Pez Murciélago}
\begin{definicion}[Pez Murciélago]
Pez de la familia Ogcocephalidae que en su etapa juvenil desarrolla un camuflaje pasivo imitando hojas flotantes, mecanismo evolutivo de supervivencia en etapas vulnerables del desarrollo.
\end{definicion}
\textbf{Características del mimetismo ontogénico:}
\begin{table}[h]
\centering
\begin{tabular}{@{}p{5cm}@{}p{5cm}@{}}
\toprule
\textbf{Etapa Juvenil} & \textbf{Etapa Adulta} \\ \midrule
Imita hoja flotante & Forma de pez murciélago típica \\
Comportamiento pasivo & Movimiento activo \\
Superficie de agua libre & Aguas profundas \\
Depredación evasiva & Depredación activa \\
Coloración críptica & Coloración aposemática \\ \bottomrule
\end{tabular}
\caption{Dimorfismo ontogénico en pez murciélago}
\end{table}
\begin{ejemplo}[Secuencia de Desarrollo]
\begin{enumerate}
\item \textbf{Eclosión}: Larva planctónica inicial
\item \textbf{Asentamiento}: Migración a superficie, adopción de forma de hoja
\item \textbf{Cripis}: Camuflaje pasivo, deriva con corrientes
\item \textbf{Metamorfosis}: Desarrollo de características adultas
\item \textbf{Transición}: Abandono del disfraz, migración a profundidad
\item \textbf{Madurez}: Patrón de coloración aposemática, estilo de vida bentónico
\end{enumerate}
\end{ejemplo}
\textbf{Ventajas evolutivas del camuflaje ontogénico:}
\begin{itemize}
\item \textbf{Reducción de depredación}: Estadísticamente significativo en etapas vulnerables
\item \textbf{Ahorro energético}: No requiere inversión en fuga o combate
\item \textbf{Aproximación a presas}: Permite acercarse sin detección
\item \textbf{Optimización de recursos}: Energía dirigida a crecimiento en lugar de defensa activa
\end{itemize}
\section{Psicología y Economía Conductual}
\subsection{El Efecto Señuelo: Arquitectura de Choice}
\begin{definicion}[Efecto Señuelo / Decoy Effect]
Sesgo cognitivo donde la presencia de una tercera opción poco atractiva (el señuelo) modifica sistemáticamente las preferencias entre dos opciones principales, dirigiendo la elección hacia la opción preferida por el ofertante.
\end{definicion}
\textbf{Formalización matemática del efecto señuelo:}
Sean $A$, $B$ y $D$ tres opciones, donde $D$ es el señuelo diseñado para favorecer $B$ sobre $A$. El efecto señuelo ocurre cuando:
\begin{equation}
P(B|A,B) < P(B|A,B,D)
\end{equation}
Donde $P(B|A,B)$ es la probabilidad de elegir $B$ cuando solo están presentes $A$ y $B$, y $P(B|A,B,D)$ es la probabilidad de elegir $B$ cuando el señuelo $D$ está presente.
\textbf{Ejemplo paradigmático - Palomitas de cine:}
\begin{table}[h]
\centering
\begin{tabular}{@{}ccc@{}}
\toprule
\textbf{Tamaño} & \textbf{Precio} & \textbf{Precio por unidad} \\
\midrule
Pequeña & \$4.00 & \$0.40/oz \\
Mediana & \$6.50 & \$0.43/oz \\
Grande & \$7.00 & \$0.28/oz \\
\bottomrule
\end{tabular}
\caption{Estructura de precios con señuelo incorporado}
\end{table}
\begin{importante}[Análisis del Disparidad]
La mediana sirve como señuelo porque:
\begin{itemize}
\item Precio por unidad PEOR que la pequeña
\item Solo \$0.50 menos que la grande
\item Casi nadie la compra (propósito no-consumo)
\item Hace que la grande parezca una ``ganga comparativa''
\end{itemize}
\end{importante}
\textbf{Fundamentos cognitivos:}
\begin{enumerate}
\item \textbf{Comparación relativa}: Los humanos evaluamos opciones en contexto, no absolutamente
\item \textbf{Aversión a la pérdida}: No obtener el ``mejor valor'' se percibe como pérdida
\item \textbf{Anclaje}: La mediana sirve como referencia que hace la grande parecer razonable
\item \textbf{Simplificación heurística}: Elegimos la opción que requiere menos justificación cognitiva
\end{enumerate}
\begin{tikzpicture}[
node distance=2cm,
box/.style={rectangle, draw, minimum width=2cm, minimum height=1cm, align=center},
decoy/.style={rectangle, draw, dashed, minimum width=2cm, minimum height=1cm, align=center}
]
\node[box, align=center] (pequena) {Pequeña\\\$4};
\node[decoy, right=1cm of pequena, align=center] (mediana) {Mediana\\\$6.50\\(Señuelo)};
\node[box, right=1cm of mediana, fill=green!20, align=center] (grande) {Grande\\\$7\\(Objetivo)};
\draw[->, thick, red] (mediana) to[bend left] node[above, font=\tiny] {hace atractiva} (grande);
\draw[->, dashed, gray] (pequena) to[bend right] node[below, font=\tiny] {menos valor percibido} (grande);
\end{tikzpicture}
\subsection{Aplicaciones del Efecto Señuelo}
\begin{itemize}
\item \textbf{Suscripciones de software}: Free, Pro (señuelo), Enterprise (objetivo)
\item \textbf{Productos electrónicos}: Modelos intermedios con características específicas para impulsar premium
\item \textbf{Menús de restaurantes}: Platos extremadamente caros que hacen otros parecer razonables
\item \textbf{Billetes de avión}: Clases configuradas para incentivar cierta elección
\item \textbf{Políticas públicas}: Presentación de opciones para dirigir opinión pública
\end{itemize}
\section{Ingeniería y Tecnología}
\subsection{Cubos de Basura Inteligentes: Visión Artificial Aplicada}
\begin{definicion}[Sistema de Predicción de Trayectoria]
Sistema cibernético que integra visión computacional, aprendizaje automático y actuación mecánica para anticipar la trayectoria de objetos en movimiento y posicionarse óptimamente para su recepción.
\end{definicion}
\textbf{Arquitectura del sistema HTX Studio:}
\begin{center}
\begin{tikzpicture}[
sensor/.style={circle, draw, fill=yellow!20, minimum size=1cm},
process/.style={rectangle, draw, fill=blue!10, minimum width=2cm},
actuator/.style={rectangle, draw, fill=green!10, minimum width=2cm},
arrow/.style={-Stealth, thick}
]
\node[sensor] (camera) {Cámara};
\node[process, right=1.5cm of camera, align=center] (vision) {Visión\\Computacional};
\node[process, right=1cm of vision, align=center] (ml) {Machine\\Learning};
\node[process, right=1cm of ml, align=center] (predict) {Predicción\\Trayectoria};
\node[actuator, right=1cm of predict] (motor) {Motor};
\node[below=0.5cm of motor] (trash) {Cubo};
\draw[arrow] (camera) -- (vision);
\draw[arrow] (vision) -- (ml);
\draw[arrow] (ml) -- (predict);
\draw[arrow] (predict) -- (motor);
\draw[arrow] (motor) -- (trash);
\end{tikzpicture}
\end{center}
\textbf{Especificaciones técnicas:}
\begin{table}[h]
\centering
\begin{tabular}{@{}ll@{}}
\toprule
\textbf{Componente} & \textbf{Especificación} \\ \midrule
Sensores & Cámaras de alta velocidad (60+ fps) \\
Procesamiento & GPU integrada para inferencia en tiempo real \\
Algoritmo & Red neuronal convolucional para detección \\
Actuación & Motores DC con encoder de posición \\
Latencia & <100 ms de detección a movimiento \\
Precisión & >90\% en condiciones normales \\ \bottomrule
\end{tabular}
\end{table}
\subsection{Realidad Aumentada Automotriz: Sistema Xpeng}
\begin{dato}[Emotional AR Driving]
El fabricante chino Xpeng implementa realidad aumentada para comunicación emocional entre conductores, permitiendo proyección de emojis virtuales hacia otros vehículos.
\end{dato}
\textbf{Objetivos de diseño:}
\begin{itemize}
\item \textbf{Canalización de agresión}: Alternativa no-física a gestos agresivos
\item \textbf{Seguridad vial}: Reducción de confrontaciones físicas
\item \textbf{Expresión emocional}: Válvula de escape para frustración
\item \textbf{Diferenciación de marca}: Característica distintiva en mercado saturado
\end{itemize}
\textbf{Implementación técnica:}
\begin{itemize}
\item \textbf{Hardware}: Proyectores HUD (Head-Up Display) en parabrisas
\item \textbf{Software}: Sistema de selección gestual o por comandos de voz
\item \textbf{Renderizado}: Emojis superpuestos a visión del mundo real
\item \textbf{Calibración}: Ajuste automático según distancia del vehículo objetivo
\end{itemize}
\subsection{Repulsión Magnética: Campaña Mercedes-Benz}
\begin{ejemplo}[Brilliant Marketing Campaign]
Mercedes-Benz desarrolló carritos de juguete con imanes de repulsión ocultos para demostrar su tecnología de frenados ABS, creando vehículos ``incolisionables'' que paradójicamente fueron rechazados por el público infantil.
\end{ejemplo}
\textbf{Principio físico implementado:}
\begin{formula}[Ley de Repulsión Magnética]
\begin{equation}
F = \frac{\mu q_1 q_2}{4\pi r^2}
\end{equation}
Donde $F$ es la fuerza de repulsión, $\mu$ la permeabilidad magnética, $q_1$ y $q_2$ las cargas magnéticas (polos), y $r$ la distancia entre imanes.
\end{formula}
\textbf{Paradoja de aceptación:}
\begin{itemize}
\item \textbf{Objetivo publicitario}: Demostrar tecnología de prevención de colisiones
\item \textbf{Resultado técnico}: Carritos efectivamente incapaces de colisionar
\item \textbf{Recepción infantil}: Rechazo universal (eliminaba la diversión principal: chocar autos)
\item \textbf{Resultado publicitario}: Campaña viral exitosa a pesar de falla comercial del producto
\end{itemize}
Este caso ilustra la tensión entre \textit{seguridad} y \textit{ludicidad}, mostrando que en productos recreativos, la prevención del comportamiento ``peligroso'' puede eliminar el valor principal del producto.
\subsection{Sensor Láser de Privacidad Computacional}
\begin{definicion}[Sistema de Seguridad por Hilo Láser]
Dispositivo de seguridad informática que utiliza un haz láser invisible como sensor perimetral; cuando el haz es interrumpido por aproximación, el sistema oculta automáticamente el contenido de pantalla.
\end{definicion}
\textbf{Especificaciones técnicas:}
\begin{table}[h]
\centering
\begin{tabular}{@{}ll@{}}
\toprule
\textbf{Parámetro} & \textbf{Especificación} \\ \midrule
Fuente láser & Diodo láser clase 1 (seguro para ojos) \\
Longitud de onda & 650 nm (rojo invisible) \\
Rango de detección & 0.5-2 metros \\
Latencia de respuesta & <50 ms \\
Integración OS & Windows/macOS/Linux \\ \bottomrule
\end{tabular}
\end{table}
\textbf{Aplicaciones:}
\begin{itemize}
\item Oficinas con tráfico peatonal constante
\item Espacios de trabajo compartido
\item Ambientes donde se maneja información sensible
\item Prevención de visualización accidental de contenido confidencial
\end{itemize}
\section{Ciencia de Materiales}
\subsection{Resistencia de Piezas Lego: Efecto del Pigmento}
\begin{importante}[Variación por Color]
La resistencia mecánica de las piezas de Lego varía significativamente según el pigmento utilizado, con diferencias de hasta 80 kg en capacidad de carga entre el amarillo (550 kg) y el blanco (630 kg).
\end{importante}
\textbf{Datos de resistencia por color:}
\begin{table}[h]
\centering
\begin{tabular}{@{}lcc@{}}
\toprule
\textbf{Color} & \textbf{Resistencia (kg)} & \textbf{Variación vs. Base} \\ \midrule
Amarillo & 550 & -12.5\% \\
Negro & 560 & -11.1\% \\
Azul & 565 & -10.3\% \\
Rojo & 600 & -4.8\% \\
Blanco & \textbf{630} & \textbf{Referencia (+0\%)} \\ \bottomrule
\end{tabular}
\caption{Capacidad de carga según pigmento (Datos experimentales)}
\end{table}
\begin{dato}[Explicación Científica]
\begin{itemize}
\item \textbf{Pigmentos intensos} (azul): Introducen imperfecciones cristalinas en el polímero ABS
\item \textbf{Pigmento blanco} (dióxido de titanio): Actúa como filler reforzante
\item \textbf{Mecanismo}: Dióxido de titanio se integra a matriz polimérica aumentando densidad de entrecruzamiento
\item \textbf{Consecuencia}: Piezas blancas consistentemente más resistentes que otras colores
\end{itemize}
\end{dato}
\textbf{Control de calidad Lego:}
\begin{itemize}
\item \textbf{Tasa de defectos}: 18 piezas por millón (0.0018\%)
\item \textbf{Tolerancia dimensional}: $\pm$0.002 mm (2 micras)
\item \textbf{Materiales}: Moldes de acero inoxidable endurecido
\item \textbf{Vida útil de molde}: ~1 millón de ciclos antes de reemplazo
\end{itemize}
\subsection{Resortera Gigante: Mecánica de Proyectiles}
\textbf{Análisis físico del invento de Mike Shake:}
\begin{formula}[Energía Potencial Elástica]
\begin{equation}
E_p = \frac{1}{2}kx^2
\end{equation}
Donde $E_p$ es la energía potencial almacenada, $k$ la constante elástica del material, y $x$ la distancia de estiramiento.
\end{formula}
\begin{formula}[Energía Cinética de Proyectil]
\begin{equation}
E_c = \frac{1}{2}mv^2
\end{equation}
Donde $E_c$ es la energía cinética, $m$ la masa del proyectil (bola de acero), y $v$ la velocidad de salida.
\end{formula}
\textbf{Sistema de manivela de tensión:}
\begin{itemize}
\item \textbf{Ventaja mecánica}: Relación de transmisión >10:1
\item \textbf{Acumulación gradual}: Energía aplicada en múltiples rotaciones
\item \textbf{Seguridad}: Reducción de riesgo de retroceso
\item \textbf{Precisión}: Control exacto del grado de tensión
\end{itemize}
\section{Acústica y Bioacústica}
\subsection{Propagación del Sonido en Diferentes Medios}
\begin{definicion}[Atenuación Sonora]
Pérdida de intensidad del sonido a medida que se propaga through un medio, dependiente de las propiedades físicas del medio y la frecuencia de la onda sonora.
\end{definicion}
\textbf{Comparación de alcances máximos:}
\begin{table}[h]
\centering
\begin{tabular}{@{}lcccc@{}}
\toprule
\textbf{Medio} & \textbf{Densidad} & \textbf{Animal} & \textbf{Frecuencia} & \textbf{Alcance} \\ \midrule
Aire & $1.225\text{ kg/m}^3$ & Lobo & 300-2000 Hz & 16 km \\
Agua & $1000\text{ kg/m}^3$ & Ballena azul & 10-40 Hz & 1600+ km \\
\bottomrule
\end{tabular}
\end{table}
\begin{importante}[Explicación Física]
\begin{itemize}
\item \textbf{Densidad del medio}: Agua ~800 veces más densa que aire
\item \textbf{Absorción}: Menor en agua para frecuencias bajas
\item \textbf{Canal SOFAR}: Canal de sonido profundo en océano que guía ondas sonoras
\item \textbf{Infrasonidos}: Frecuencias <20 Hz viajan miles de kilómetros con atenuación mínima
\end{itemize}
\end{importante}
\textbf{Análisis comparativo de atenuación:}
\begin{formula}[Coeficiente de Atenuación]
\begin{equation}
\alpha \propto \frac{f^2}{\rho c^3}
\end{equation}
Donde $\alpha$ es el coeficiente de atenuación, $f$ la frecuencia, $\rho$ la densidad del medio, y $c$ la velocidad del sonido. Menor atenuación en agua explica el alcance de 100$\times$ mayor para comunicaciones de ballenas.
\end{formula}
\subsection{Comunicación de Ballenas: Infrasonidos Oceánicos}
\begin{dato}[Ballena Azul]
Emite infrasonidos profundos (10-40 Hz) que pueden recorrer más de 1600 kilómetros bajo el agua, permitiendo comunicación entre individuos en cuencas oceánicas completas.
\end{dato}
\textbf{Adaptaciones evolutivas para comunicación de largo alcance:}
\begin{itemize}
\item \textbf{Frecuencias bajas}: Menor atenuación en agua
\item \textbf{Alta intensidad}: Hasta 188 dB (referenciado a 1 $\mu$Pa a 1 m)
\item \textbf{Repetición}: Patrones repetitivos para aumentar probabilidad de detección
\item \textbf{Canal SOFAR}: Aprovechamiento de canal acústico natural
\end{itemize}
\section{Eventos Culturales y Fenómenos Sociales}
\subsection{Festival de Lanzamiento de Vehículos: Glacier View, Alaska}
\begin{definicion}[Festival de Lanzamiento de Vehículos]
Evento anual en Glacier View, Alaska, donde vehículos son lanzados desde un acantilado de 90 metros hacia un estanque como forma de entretenimiento comunitario y expresión cultural.
\end{definicion}
\textbf{Especificaciones del evento:}
\begin{table}[h]
\centering
\begin{tabular}{@{}ll@{}}
\toprule
\textbf{Parámetro} & \textbf{Valor} \\ \midrule
Ubicación & Glacier View, Alaska ($62^\circ$N $145^\circ$W) \\
Altura del acantilado & 90 metros (295 pies) \\
Elevación del estanque & ~400 msnm \\
Temporada & Verano (junio-agosto) \\
Asistentes & 200-500 espectadores \\
Vehículos por evento & 10-30 unidades \\ \bottomrule
\end{tabular}
\end{table}
\begin{formula}[Tiempo de Caída Libre]
\begin{equation}
t = \sqrt{\frac{2h}{g}} = \sqrt{\frac{2 \times 90}{9.81}} \approx 4.28 \text{ segundos}
\end{equation}
\begin{equation}
v_{impacto} = gt = 9.81 \times 4.28 \approx 42 \text{ m/s} \approx 150 \text{ km/h}
\end{equation}
\end{formula}
\textbf{Aspectos sociológicos:}
\begin{itemize}
\item \textbf{Identidad comunitaria}: Evento distintivo que define a la comunidad
\item \textbf{Relación con tecnología}: Celebración irónica de cultura automotriz
\item \textbf{Arte efímero}: Decoración de vehículos como expresión artística temporal
\item \textbf{Economía local}: Atracción turística que genera ingresos para comunidad pequeña
\end{itemize}
\subsection{Gastronomía Espectáculo: Restaurante con Tirolesa}
\textbf{Sistema de entrega por tirolesa en Bangkok:}
\begin{itemize}
\item \textbf{Mecanismo}: Sistema de cables con gravedad asistida
\item \textbf{Seguridad}: Arnés de triple punto de anclaje
\item \textbf{Presentación}: Platos en contenedores especiales anti-volteo
\item \textbf{Experiencia}: Cada orden es performance en sí misma
\end{itemize}
\begin{ejemplo}[Diferenciación Experiencial]
En mercado restauratero saturado de Bangkok, la tirolesa no es solo método de entrega sino \textit{producto principal}: los clientes pagan tanto por el espectáculo de recibir su comida ``volando'' como por la comida misma.
\end{ejemplo}
\section{Arte y Expresión Creativa}
\subsection{Vodan Valsikov: Arquitectura Capilar}
\begin{dato}[Vodan Valsikov]
Barbero ucraniano viralizado por crear patrones geométricos complejos e ilusiones ópticas mediante corte de cabello, transformando la cabeza en lienzo artístico.
\end{dato}
\textbf{Técnicas empleadas:}
\begin{itemize}
\item \textbf{Geometría euclidiana}: Patrones basados en figuras regulares
\item \textbf{Perspectiva}: Uso de profundidad para crear ilusiones 3D
\item \textbf{Gradiente}: Variaciones de longitud para efecto de sombreado
\item \textbf{Simetría}: Patrones bilateral y radialmente simétricos
\end{itemize}
\subsection{Ghost Pittur: Arte Inverso Urbano}
\begin{definicion}[Arte Inverso / Reverse Graffiti]
Práctica artística que consiste en crear limpiando superficies sucias en lugar de aplicar pigmento; el ``arte'' emerge por sustracción de suciedad en lugar de por adición de material.
\end{definicion}
\textbf{Metodología de Ghost Pittur:}
\begin{enumerate}
\item \textbf{Reconocimiento}: Identificación de superficies vandalizadas
\item \textbf{Planificación}: Diseño del patrón de limpieza
\item \textbf{Ejecución}: Limpieza selectiva (generalmente con lavadora a presión)
\item \textbf{Revelación}: El patrón emerge por contraste entre superficie limpia y sucia
\item \textbf{Temporalidad}: Obra efímera que eventualmente será vandalizada nuevamente
\end{enumerate}
\textbf{Aspectos legales y filosóficos:}
\begin{itemize}
\item \textbf{Ambigüedad legal}: ¿Es vandalismo revertir vandalismo?
\item \textbf{Consentimiento}: Generalmente realizado sin permiso del propietario
\item \textbf{Valor estético}: ¿Es arte si no hay adición sino sustracción?
\item \textbf{Impermanencia}: Aceptación de la naturaleza temporal del trabajo
\end{itemize}
\subsection{Wang Liang: El Hombre Invisible}
\begin{dato}[Camuflaje Humano]
Artista chino especializado en pintura corporal que le permite fundirse completamente con entornos naturales y urbanos, documentando el performance mediante fotografía.
\end{dato}
\textbf{Proceso de creación:}
\begin{enumerate}
\item \textbf{Selección del entorno}: Identificación de fondo con potencial para camuflaje
\item \textbf{Documentación del fondo}: Fotografía de alta resolución del entorno
\item \textbf{Preparación de modelo}: Aplicación de pintura corporal base
\item \textbf{Pintura detallada}: Reproducción meticulosa de patrones del entorno
\item \textbf{Posicionamiento}: Alineación precisa del modelo con el fondo
\item \textbf{Documentación final}: Fotografía del performance terminado
\end{enumerate}
\textbf{Temas explorados:}
\begin{itemize}
\item \textbf{Identidad}: Disolución del yo en el entorno
\item \textbf{Observación}: Límites entre percepción y realidad
\item \textbf{Naturaleza vs. Cultura}: Integración de humano con entorno natural
\item \textbf{Anonimato}: Desaparición de individualidad en espacios masivos
\end{itemize}
\section{Cultura Digital y Fenómenos de Redes Sociales}
\subsection{Récord de YouTube: Avril Lavigne}
\begin{dato}[Primer Video en 100 Millones]
\textit{"Girlfriend"} de Avril Lavigne fue el primer video en YouTube en alcanzar 100 millones de reproducciones, marcando un hito en la era del contenido viral y estableciendo nuevas métricas de éxito en música digital.
\end{dato}
\textbf{Contexto histórico:}
\begin{itemize}
\item \textbf{Año}: 2007 (época temprana de YouTube)
\item \textbf{Plataforma}: YouTube fundado en 2005, en fase de expansión
\item \textbf{Industria musical}: Transición de MTV a YouTube como plataforma principal
\item \textbf{Métricas}: Establecimiento de vistas como medida de éxito
\end{itemize}
\subsection{Nombres Tecnológicos: ChatGPT Bastidas Guerra}
\begin{importante}[Caso ChatGPT]
Bebé registrada oficialmente en Cereté, Córdoba, Colombia, con el nombre ``ChatGPT Bastidas Guerra'', reflejando la penetración cultural de sistemas de inteligencia artificial en prácticas de nominación (naming practices).
\end{importante}
\textbf{Implicaciones sociológicas:}
\begin{enumerate}
\item \textbf{Digitalización de identidad}: Sistema de IA integrado a documento legal de identidad
\item \textbf{Memoria cultural}: Nombre preserva momento específico de adopción tecnológica
\item \textbf{Identidad anticipada}: Niña cargará marca comercial específica como nombre personal
\item \textbf{Potencial estigma}: Riesgo de asociación con tecnología que puede volverse obsoleta o controversial
\end{enumerate}
\textbf{Contexto legal:}
\begin{itemize}
\item \textbf{Ley colombiana}: Nombres deben respetar dignidad del niño
\item \textbf{Interpretación}: ¿Es ``ChatGPT'' un nombre válido bajo estándares de dignidad?
\item \textbf{Precedente}: Casos anteriores de nombres no tradicionales en Colombia
\end{itemize}
\subsection{Memoria Animal: Ariel la Ninfa}
\begin{definicion}[Memoria Auditiva Animal]
Capacidad cognitiva de procesar, almacenar y reproducir patrones sonoros complejos, documentada en diversas especies aviares y mamíferas.
\end{definicion}
\textbf{Caso Ariel -- Ninfa memoriza tono de iPhone:}
\begin{itemize}
\item \textbf{Especie}: Probablemente \textit{Agapornis} (lovebird) o similar
\item \textbf{Habilidad}: Memorización y reproducción de melodía específica
\item \textbf{Mecanismo}: Aprendizaje vocal por imitación
\item \textbf{Significado}: Demostración de memoria auditiva episódica en aves
\end{itemize}
\section{Nomenclatura y Coincidencias}
\subsection{HTX: Dualidad de Denominación}
\textbf{Coincidencia onomástica:}
\begin{itemize}
\item \textbf{HTX Studio}: Equipo de ingeniería con sede en China, desarrollador de cubos de basura inteligentes
\item \textbf{Comunidad HTX}: Referencia a comunidad de Glacier View, Alaska
\item \textbf{Similitud}: Ambas entidades utilizan sigla ``HTX''
\item \textbf{Diferencia}: No existe relación conocida entre las entidades
\end{itemize}
Este caso ilustra fenómeno de \textbf{convergencia onomástica} --diferentes entidades desarrollando independientemente nombres idénticos o similares--, relativamente común en contexto de globalización y abbreviated naming conventions.
\section{Análisis Demográfico}
\subsection{Distribución de Fechas de Nacimiento}
\begin{dato}[Patrones Estacionales]
Existen variaciones estadísticamente significativas en la frecuencia de nacimientos a lo largo del año, con ciertas fechas siendo considerablemente más comunes o raras que otras.
\end{dato}
\textbf{Factores que influyen en distribución:}
\begin{table}[h]
\centering
\begin{tabular}{@{}ll@{}}
\toprule
\textbf{Factor} & \textbf{Efecto} \\ \midrule
Concepción estacional & Picos en nacimientos 9 meses después \\
Condiciones climáticas & Menos concepciones en temperaturas extremas \\
Planificación cultural & Evitación de fechas festivas \\
Intervención médica & Cesáreas programadas en horarios laborales \\
Festividades & Aumento de concepciones durante vacaciones \\ \bottomrule
\end{tabular}
\end{table}
\textbf{Fechas estadísticamente extremas (EE.UU.):}
\begin{itemize}
\item \textbf{Más común}: Septiembre 16 (concepción navideña)
\item \textbf{Más rara}: Diciembre 25 (evitación de partos en Navidad)
\item \textbf{Segunda más común}: Septiembre 9
\item \textbf{Segunda más rara}: Enero 1 (evitación de Año Nuevo)
\end{itemize}
\begin{center}
\begin{tikzpicture}
\begin{axis}[
width=12cm,
height=4cm,
xlabel={Mes},
ylabel={Nacimientos (relativo a media)},
xtick={1,2,3,4,5,6,7,8,9,10,11,12},
xticklabels={Ene,Feb,Mar,Abr,May,Jun,Jul,Ago,Sep,Oct,Nov,Dic},
ymin=0.8, ymax=1.2,
grid=major,
]
\addplot[smooth, blue, thick] coordinates {
(1, 0.95) (2, 0.92) (3, 0.98) (4, 0.99) (5, 1.01) (6, 1.02)
(7, 1.03) (8, 1.05) (9, 1.12) (10, 1.08) (11, 1.02) (12, 0.85)
};
\end{axis}
\end{tikzpicture}
\end{center}
\section{Síntesis y Conclusiones}
\subsection{Temas Transversales Identificados}
La compilación de estas veinticinco curiosidades revela varios hilos conductores que conectan fenómenos aparentemente dispares:
\textbf{1. Adaptación y Evolución}
\begin{itemize}
\item Adaptación biológica: camuflaje del pez murciélago
\item Adaptación cultural: hot dog francés como variación local de producto global
\item Adaptación tecnológica: sistemas que aprenden y responden (cubos de basura inteligentes)
\end{itemize}
\textbf{2. Percepción y Realidad}
\begin{itemize}
\item Psicología: efecto señuelo manipula percepción de valor
\item Arte: Wang Liang y Ghost Pittur juegan con límites de percepción visual
\item Tecnología: realidad aumentada modifica percepción del entorno
\end{itemize}
\textbf{3. Optimización de Recursos}
\begin{itemize}
\item Biológica: pez murciélago ahorra energía mediante camuflaje pasivo
\item Industrial: Lego optimiza tolerancias para máxima compatibilidad
\item Económica: empresas diseñan arquitecturas de choice para maximizar ganancias
\end{itemize}
\textbf{4. Expresión y Creatividad}
\begin{itemize}
\item Gastronómica: innovaciones como hot dog francés o ketchup smoothie
\item Artística: Vodan Valsikov, Ghost Pittur, Wang Liang
\item Tecnológica: diseño de productos que incorporan creatividad (carritos incolisionables)
\end{itemize}
\subsection{Valor del Conocimiento Interconectado}
Este ejercicio de compilación demuestra que:
\begin{enumerate}
\item \textbf{Curiosidad como motor de aprendizaje}: Puntos de entrada triviales pueden llevar a comprensión profunda de conceptos complejos
\item \textbf{Interdisciplinariedad}: Fenómenos en un área (biología) pueden iluminar understanding en otra (ingeniería)
\item \textbf{Contextualización}: Datos aislados adquieren significado cuando conectados con patrones más amplios
\item \textbf{Serendipia}: Encuentros fortuitos entre conceptos aparentemente no relacionados generan nuevas insights
\end{enumerate}
\begin{importante}[Conclusión Principal]
Las ``cosas que no sabías hace cinco minutos'' --aunque aparentemente triviales-- funcionan como \textit{portales de entrada} a understanding más profundo de principios científicos, fenómenos culturales y patrones de comportamiento. Cada curiosidad es nodo en red de conocimiento, con conexiones que se extienden en múltiples direcciones hacia áreas tradicionalmente separadas del saber humano.
\end{importante}
\section*{Glosario}
\begin{description}[style=multiline, leftmargin=3.5cm, font=\bfseries]
\item[ABS] \textit{Acrylonitrile Butadiene Styrene}. Termoplástico utilizado en piezas Lego por su resistencia, durabilidad y precisión de moldeo.
\item[Atenuación sonora] Pérdida de intensidad de una onda sonora a medida que se propaga a través de un medio, dependiente de frecuencia y propiedades del medio.
\item[Açaí] Fruta de la palmera \textit{Euterpe oleracea}, nativa de región amazónica, rica en antioxidantes y antocianinas.
\item[Biofilm] Comunidad microbiana adherida a superficie, encapsulada en matriz extracelular polimérica; en contexto dental, ``placa bacteriana''.
\item[Canal SOFAR] \textit{Sound Fixing and Ranging Channel}. Capa oceánica donde la velocidad del sonido alcanza mínimo, creando guía de ondas naturales para comunicación de larga distancia.
\item[Camuflaje] Adaptación que permite organismo mezclarse visualmente con su entorno, evitando detección por depredadores o presas.
\item[Choice architecture] Diseño del entorno en que las personas toman decisiones, para influir en elección sin restringir opciones (concepto de Thaler y Sunstein).
\item[Decoy effect] Ver ``Efecto señuelo''.
\item[Definición operacional] Definición de concepto en términos de procedimientos o mediciones específicas, permitiendo su replicación experimental.
\item[Dióxido de titanio] Pigmento blanco ($TiO_2$) utilizado en plásticos; actúa como filler reforzante aumentando resistencia mecánica.
\item[Efecto señuelo] Sesgo cognitivo donde una opción poco atractiva (señuelo) modifica preferencias entre dos opciones principales.
\item[Emmental] Queso suizo de origen, caracterizado por agujeres formados por bubbles de $CO_2$ durante fermentación.
\item[Encapsulamiento] En programación orientada a objetos, ocultación de detalles de implementación exponiendo solo interfaz pública.
\item[Episodio memorial] Memoria de evento específico contextualizado en tiempo y espacio, con detalle fenomenológico.
\item[Frame] En teoría de decisiones, marco conceptual que influencia cómo opciones son percibidas y evaluadas.
\item[Glocalización] Estrategia de adaptar productos globales a preferencias locales, manteniendo elementos de identidad global.
\item[Gruyère] Queso suizo similar a emmental, utilizado en cocina francesa por sus características de fusión.
\item[Heuristic] Atajo mental simplificador que reduce carga cognitiva en toma de decisiones.
\item[HTX Studio] Equipo de ingeniería y tecnología con base en China, desarrollador de sistemas de cubos de basura inteligentes.
\item[Infrasonido] Sonido de frecuencia inferior a 20 Hz, por debajo del umbral auditivo humano pero detectable por algunos animales.
\item[Mechanics] Rama de física que estudia movimiento y fuerzas; en ingeniería, aplicación de principios mecánicos a diseño de sistemas.
\item[Mimetismo] Capacidad de organismo para imitar apariencia de otro objeto u organismo, obteniendo ventaja evolutiva.
\item[Machine Learning] Subcampo de inteligencia artificial enfocado en desarrollo de algoritmos que aprenden patrones a partir de datos.
\item[Neural network] Arquitectura computacional inspirada en redes neuronales biológicas, utilizada para reconocimiento de patrones.
\item[Ontogenia] Desarrollo de organismo individual desde fertilización hasta madurez.
\item[Placa dental] Biofilm adherido a superficie dental, compuesto por bacterias (principalmente \textit{Streptococcus mutans}), restos alimenticios y polímeros bacterianos.
\item[Repulsión magnética] Fuerza entre polos magnéticos iguales que causa alejamiento mutuo, descrita por ley de Coulomb magnética.
\item[Reverse graffiti] Ver ``Arte inverso''.
\item[Serendipia] Hallazgo fortuito o afortunado de algo valioso no buscado originalmente.
\item[Streptococcus mutans] Bacteria grampositiva, anaerobia facultativa, principal agente etiológico de caries dental en humanos.
\item[Sulforafano] Compuesto organosulfurado presente en vegetales crucíferos (brócoli, coliflor), con propiedades antibacterianas y antioxidantes.
\item[Tirolesa] Sistema de transporte consistente en cable tendido entre dos puntos, por el cual se desplaza persona o vehículo suspendido.
\item[Xpeng] Fabricante chino de vehículos eléctricos, conocido por integrar tecnología avanzada de realidad aumentada en automóviles.
\end{description}
\section*{Referencias}
\noindent La información presentada en este documento ha sido compilada de diversas fuentes incluyendo documentación científica, reportajes de medios, y observaciones de fenómenos culturales contemporáneos. Los datos específicos sobre resistencia de piezas Lego, características del efecto señuelo, y propiedades del brócoli están respaldados por literatura científica y técnica en las respectivas áreas.
\noindent Para mayor profundización en los temas presentados, se recomienda consultar:
\begin{itemize}
\item Literatura especializada en ciencia de materiales para análisis de pigmentos en polímeros
\item Investigaciones en economía conductual para estudio del efecto señuelo
\item Textos de bioacústica para comunicación de cetáceos
\item Documentación etológica para mimetismo en peces y otros organismos
\item Fuentes de sociología y antropología para análisis de fenómenos culturales contemporáneos
\end{itemize}
\end{document}

17
services/__init__.py Normal file
View File

@@ -0,0 +1,17 @@
"""
Services package for CBCFacil
"""
from .webdav_service import WebDAVService, webdav_service
from .vram_manager import VRAMManager, vram_manager
from .telegram_service import TelegramService, telegram_service
from .gpu_detector import GPUDetector, GPUType, gpu_detector
from .ai import ai_service
__all__ = [
'WebDAVService', 'webdav_service',
'VRAMManager', 'vram_manager',
'TelegramService', 'telegram_service',
'GPUDetector', 'GPUType', 'gpu_detector',
'ai_service'
]

20
services/ai/__init__.py Normal file
View File

@@ -0,0 +1,20 @@
"""
AI Providers package for CBCFacil
"""
from .base_provider import AIProvider
from .claude_provider import ClaudeProvider
from .gemini_provider import GeminiProvider
from .provider_factory import AIProviderFactory, ai_provider_factory
# Alias for backwards compatibility
ai_service = ai_provider_factory
__all__ = [
'AIProvider',
'ClaudeProvider',
'GeminiProvider',
'AIProviderFactory',
'ai_provider_factory',
'ai_service'
]

View File

@@ -0,0 +1,45 @@
"""
Base AI Provider interface (Strategy pattern)
"""
from abc import ABC, abstractmethod
from typing import Optional, Dict, Any
class AIProvider(ABC):
"""Abstract base class for AI providers"""
@abstractmethod
def summarize(self, text: str, **kwargs) -> str:
"""Generate summary of text"""
pass
@abstractmethod
def correct_text(self, text: str, **kwargs) -> str:
"""Correct grammar and spelling in text"""
pass
@abstractmethod
def classify_content(self, text: str, **kwargs) -> Dict[str, Any]:
"""Classify content into categories"""
pass
@abstractmethod
def generate_text(self, prompt: str, **kwargs) -> str:
"""Generate text from prompt"""
pass
@abstractmethod
def fix_latex(self, latex_code: str, error_log: str, **kwargs) -> str:
"""Fix broken LaTeX code based on compiler error log"""
pass
@abstractmethod
def is_available(self) -> bool:
"""Check if provider is available and configured"""
pass
@property
@abstractmethod
def name(self) -> str:
"""Provider name"""
pass

View File

@@ -0,0 +1,158 @@
"""
Claude AI Provider implementation
"""
import logging
import subprocess
import shutil
from typing import Dict, Any, Optional
from config import settings
from core import AIProcessingError
from .base_provider import AIProvider
class ClaudeProvider(AIProvider):
"""Claude AI provider using CLI"""
def __init__(self):
self.logger = logging.getLogger(__name__)
self._cli_path = settings.CLAUDE_CLI_PATH or shutil.which("claude")
self._token = settings.ZAI_AUTH_TOKEN
self._base_url = settings.ZAI_BASE_URL
@property
def name(self) -> str:
return "Claude"
def is_available(self) -> bool:
"""Check if Claude CLI is available"""
return bool(self._cli_path and self._token)
def _get_env(self) -> Dict[str, str]:
"""Get environment variables for Claude"""
# Load all user environment variables first
import os
env = os.environ.copy()
# Override with our specific settings if available
if self._token:
env["ANTHROPIC_AUTH_TOKEN"] = self._token
if self._base_url:
env["ANTHROPIC_BASE_URL"] = self._base_url
# Add critical flags
env["PYTHONUNBUFFERED"] = "1"
# Ensure model variables are picked up from env (already in os.environ)
# but if we had explicit settings for them, we'd set them here.
# Since we put them in .env and loaded via load_dotenv -> os.environ,
# simply copying os.environ is sufficient.
return env
def _run_cli(self, prompt: str, timeout: int = 600) -> str:
"""Run Claude CLI with prompt using -p flag for stdin input"""
if not self.is_available():
raise AIProcessingError("Claude CLI not available or not configured")
try:
# Use -p flag to read prompt from stdin, --dangerously-skip-permissions for automation
cmd = [self._cli_path, "--dangerously-skip-permissions", "-p", "-"]
process = subprocess.run(
cmd,
input=prompt,
env=self._get_env(),
text=True,
capture_output=True,
timeout=timeout,
shell=False,
)
if process.returncode != 0:
error_msg = process.stderr or "Unknown error"
raise AIProcessingError(f"Claude CLI failed: {error_msg}")
return process.stdout.strip()
except subprocess.TimeoutExpired:
raise AIProcessingError(f"Claude CLI timed out after {timeout}s")
except Exception as e:
raise AIProcessingError(f"Claude CLI error: {e}")
def summarize(self, text: str, **kwargs) -> str:
"""Generate summary using Claude"""
prompt = f"""Summarize the following text:
{text}
Provide a clear, concise summary in Spanish."""
return self._run_cli(prompt)
def correct_text(self, text: str, **kwargs) -> str:
"""Correct text using Claude"""
prompt = f"""Correct the following text for grammar, spelling, and clarity:
{text}
Return only the corrected text, nothing else."""
return self._run_cli(prompt)
def classify_content(self, text: str, **kwargs) -> Dict[str, Any]:
"""Classify content using Claude"""
categories = [
"historia",
"analisis_contable",
"instituciones_gobierno",
"otras_clases",
]
prompt = f"""Classify the following text into one of these categories:
- historia
- analisis_contable
- instituciones_gobierno
- otras_clases
Text: {text}
Return only the category name, nothing else."""
result = self._run_cli(prompt).lower()
# Validate result
if result not in categories:
result = "otras_clases"
return {"category": result, "confidence": 0.9, "provider": self.name}
def generate_text(self, prompt: str, **kwargs) -> str:
"""Generate text using Claude"""
return self._run_cli(prompt)
def fix_latex(self, latex_code: str, error_log: str, **kwargs) -> str:
"""Fix broken LaTeX code using Claude"""
prompt = f"""I have a LaTeX file that failed to compile. Please fix the code.
COMPILER ERROR LOG:
{error_log[-3000:]}
BROKEN LATEX CODE:
{latex_code}
INSTRUCTIONS:
1. Analyze the error log to find the specific syntax error.
2. Fix the LaTeX code.
3. Return ONLY the full corrected LaTeX code.
4. Do not include markdown blocks or explanations.
5. Start immediately with \\documentclass.
COMMON LATEX ERRORS TO CHECK:
- TikZ nodes with line breaks (\\\\) MUST have "align=center" in their style.
WRONG: \\node[box] (n) {{Text\\\\More}};
CORRECT: \\node[box, align=center] (n) {{Text\\\\More}};
- All \\begin{{env}} must have matching \\end{{env}}
- All braces {{ }} must be balanced
- Math mode $ must be paired
- Special characters need escaping: % & # _
- tcolorbox environments need proper titles: [Title] not {{Title}}
"""
return self._run_cli(prompt, timeout=180)

View File

@@ -0,0 +1,337 @@
"""
Gemini AI Provider - Optimized version with rate limiting and retry
"""
import logging
import subprocess
import shutil
import requests
import time
from typing import Dict, Any, Optional
from datetime import datetime, timedelta
from config import settings
from core import AIProcessingError
from .base_provider import AIProvider
class TokenBucket:
"""Token bucket rate limiter"""
def __init__(self, rate: float = 10, capacity: int = 20):
self.rate = rate # tokens per second
self.capacity = capacity
self.tokens = capacity
self.last_update = time.time()
self._lock = None # Lazy initialization
def _get_lock(self):
if self._lock is None:
import threading
self._lock = threading.Lock()
return self._lock
def acquire(self, tokens: int = 1) -> float:
with self._get_lock():
now = time.time()
elapsed = now - self.last_update
self.last_update = now
self.tokens = min(self.capacity, self.tokens + elapsed * self.rate)
if self.tokens >= tokens:
self.tokens -= tokens
return 0.0
wait_time = (tokens - self.tokens) / self.rate
self.tokens = 0
return wait_time
class CircuitBreaker:
"""Circuit breaker for API calls"""
def __init__(self, failure_threshold: int = 5, recovery_timeout: int = 60):
self.failure_threshold = failure_threshold
self.recovery_timeout = recovery_timeout
self.failures = 0
self.last_failure: Optional[datetime] = None
self.state = "closed" # closed, open, half-open
self._lock = None
def _get_lock(self):
if self._lock is None:
import threading
self._lock = threading.Lock()
return self._lock
def call(self, func, *args, **kwargs):
with self._get_lock():
if self.state == "open":
if (
self.last_failure
and (datetime.utcnow() - self.last_failure).total_seconds()
> self.recovery_timeout
):
self.state = "half-open"
else:
raise AIProcessingError("Circuit breaker is open")
try:
result = func(*args, **kwargs)
if self.state == "half-open":
self.state = "closed"
self.failures = 0
return result
except Exception as e:
self.failures += 1
self.last_failure = datetime.utcnow()
if self.failures >= self.failure_threshold:
self.state = "open"
raise
class GeminiProvider(AIProvider):
"""Gemini AI provider with rate limiting and retry"""
def __init__(self):
super().__init__()
self.logger = logging.getLogger(__name__)
self._cli_path = settings.GEMINI_CLI_PATH or shutil.which("gemini")
self._api_key = settings.GEMINI_API_KEY
self._flash_model = settings.GEMINI_FLASH_MODEL
self._pro_model = settings.GEMINI_PRO_MODEL
self._session = None
self._rate_limiter = TokenBucket(rate=15, capacity=30)
self._circuit_breaker = CircuitBreaker(failure_threshold=5, recovery_timeout=60)
self._retry_config = {
"max_attempts": 3,
"base_delay": 1.0,
"max_delay": 30.0,
"exponential_base": 2,
}
@property
def name(self) -> str:
return "Gemini"
def is_available(self) -> bool:
"""Check if Gemini CLI or API is available"""
return bool(self._cli_path or self._api_key)
def _init_session(self) -> None:
"""Initialize HTTP session with connection pooling"""
if self._session is None:
self._session = requests.Session()
adapter = requests.adapters.HTTPAdapter(
pool_connections=10,
pool_maxsize=20,
max_retries=0, # We handle retries manually
)
self._session.mount("https://", adapter)
def _run_with_retry(self, func, *args, **kwargs):
"""Execute function with exponential backoff retry"""
max_attempts = self._retry_config["max_attempts"]
base_delay = self._retry_config["base_delay"]
last_exception = None
for attempt in range(max_attempts):
try:
return self._circuit_breaker.call(func, *args, **kwargs)
except requests.exceptions.RequestException as e:
last_exception = e
if attempt < max_attempts - 1:
delay = min(
base_delay * (2**attempt), self._retry_config["max_delay"]
)
# Add jitter
delay += delay * 0.1 * (time.time() % 1)
self.logger.warning(
f"Attempt {attempt + 1} failed: {e}, retrying in {delay:.2f}s"
)
time.sleep(delay)
raise AIProcessingError(f"Max retries exceeded: {last_exception}")
def _run_cli(self, prompt: str, use_flash: bool = True, timeout: int = 300) -> str:
"""Run Gemini CLI with prompt"""
if not self._cli_path:
raise AIProcessingError("Gemini CLI not available")
model = self._flash_model if use_flash else self._pro_model
cmd = [self._cli_path, model, prompt]
try:
# Apply rate limiting
wait_time = self._rate_limiter.acquire()
if wait_time > 0:
time.sleep(wait_time)
process = subprocess.run(
cmd, text=True, capture_output=True, timeout=timeout, shell=False
)
if process.returncode != 0:
error_msg = process.stderr or "Unknown error"
raise AIProcessingError(f"Gemini CLI failed: {error_msg}")
return process.stdout.strip()
except subprocess.TimeoutExpired:
raise AIProcessingError(f"Gemini CLI timed out after {timeout}s")
except Exception as e:
raise AIProcessingError(f"Gemini CLI error: {e}")
def _call_api(self, prompt: str, use_flash: bool = True, timeout: int = 180) -> str:
"""Call Gemini API with rate limiting and retry"""
if not self._api_key:
raise AIProcessingError("Gemini API key not configured")
self._init_session()
model = self._flash_model if use_flash else self._pro_model
url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent"
payload = {"contents": [{"parts": [{"text": prompt}]}]}
params = {"key": self._api_key}
def api_call():
# Apply rate limiting
wait_time = self._rate_limiter.acquire()
if wait_time > 0:
time.sleep(wait_time)
response = self._session.post(
url, json=payload, params=params, timeout=timeout
)
response.raise_for_status()
return response
response = self._run_with_retry(api_call)
data = response.json()
if "candidates" not in data or not data["candidates"]:
raise AIProcessingError("Empty response from Gemini API")
candidate = data["candidates"][0]
if "content" not in candidate or "parts" not in candidate["content"]:
raise AIProcessingError("Invalid response format from Gemini API")
result = candidate["content"]["parts"][0]["text"]
return result.strip()
def _run(self, prompt: str, use_flash: bool = True, timeout: int = 300) -> str:
"""Run Gemini with fallback between CLI and API"""
# Try CLI first if available
if self._cli_path:
try:
return self._run_cli(prompt, use_flash, timeout)
except Exception as e:
self.logger.warning(f"Gemini CLI failed, trying API: {e}")
# Fallback to API
if self._api_key:
api_timeout = min(timeout, 180)
return self._call_api(prompt, use_flash, api_timeout)
raise AIProcessingError("No Gemini provider available (CLI or API)")
def summarize(self, text: str, **kwargs) -> str:
"""Generate summary using Gemini"""
prompt = f"""Summarize the following text:
{text}
Provide a clear, concise summary in Spanish."""
return self._run(prompt, use_flash=True)
def correct_text(self, text: str, **kwargs) -> str:
"""Correct text using Gemini"""
prompt = f"""Correct the following text for grammar, spelling, and clarity:
{text}
Return only the corrected text, nothing else."""
return self._run(prompt, use_flash=True)
def classify_content(self, text: str, **kwargs) -> Dict[str, Any]:
"""Classify content using Gemini"""
categories = [
"historia",
"analisis_contable",
"instituciones_gobierno",
"otras_clases",
]
prompt = f"""Classify the following text into one of these categories:
- historia
- analisis_contable
- instituciones_gobierno
- otras_clases
Text: {text}
Return only the category name, nothing else."""
result = self._run(prompt, use_flash=True).lower()
# Validate result
if result not in categories:
result = "otras_clases"
return {"category": result, "confidence": 0.9, "provider": self.name}
def generate_text(self, prompt: str, **kwargs) -> str:
"""Generate text using Gemini"""
use_flash = kwargs.get("use_flash", True)
if self._api_key:
return self._call_api(prompt, use_flash=use_flash)
return self._run_cli(prompt, use_flash=use_flash)
def fix_latex(self, latex_code: str, error_log: str, **kwargs) -> str:
"""Fix broken LaTeX code using Gemini"""
prompt = f"""Fix the following LaTeX code which failed to compile.
Error Log:
{error_log[-3000:]}
Broken Code:
{latex_code}
INSTRUCTIONS:
1. Return ONLY the corrected LaTeX code. No explanations.
2. Start immediately with \\documentclass.
COMMON LATEX ERRORS TO FIX:
- TikZ nodes with line breaks (\\\\) MUST have "align=center" in their style.
WRONG: \\node[box] (n) {{Text\\\\More}};
CORRECT: \\node[box, align=center] (n) {{Text\\\\More}};
- All \\begin{{env}} must have matching \\end{{env}}
- All braces {{ }} must be balanced
- Math mode $ must be paired
- Special characters need escaping: % & # _
- tcolorbox environments need proper titles: [Title] not {{Title}}
"""
return self._run(prompt, use_flash=False) # Use Pro model for coding fixes
def get_stats(self) -> Dict[str, Any]:
"""Get provider statistics"""
return {
"rate_limiter": {
"tokens": round(self._rate_limiter.tokens, 2),
"capacity": self._rate_limiter.capacity,
"rate": self._rate_limiter.rate,
},
"circuit_breaker": {
"state": self._circuit_breaker.state,
"failures": self._circuit_breaker.failures,
"failure_threshold": self._circuit_breaker.failure_threshold,
},
"cli_available": bool(self._cli_path),
"api_available": bool(self._api_key),
}
# Global instance is created in __init__.py

View File

@@ -0,0 +1,346 @@
"""
Parallel AI Provider - Race multiple providers for fastest response
Implements Strategy A: Parallel Generation with Consensus
"""
import asyncio
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed
from dataclasses import dataclass
from typing import Dict, List, Optional, Any
from datetime import datetime
from core import AIProcessingError
from .base_provider import AIProvider
@dataclass
class ProviderResult:
"""Result from a single provider"""
provider_name: str
content: str
duration_ms: int
success: bool
error: Optional[str] = None
quality_score: float = 0.0
@dataclass
class ParallelResult:
"""Aggregated result from parallel execution"""
content: str
strategy: str
providers_used: List[str]
total_duration_ms: int
all_results: List[ProviderResult]
selected_provider: str
class ParallelAIProvider:
"""
Orchestrates multiple AI providers in parallel for faster responses.
Strategies:
- "race": Use first successful response (fastest)
- "consensus": Wait for all, select best quality
- "majority": Select most common response
"""
def __init__(self, providers: Dict[str, AIProvider], max_workers: int = 4):
self.providers = providers
self.max_workers = max_workers
self.logger = logging.getLogger(__name__)
self.executor = ThreadPoolExecutor(max_workers=max_workers)
def _generate_sync(self, provider: AIProvider, prompt: str, **kwargs) -> ProviderResult:
"""Synchronous wrapper for provider generation"""
start_time = datetime.now()
try:
content = provider.generate_text(prompt, **kwargs)
duration_ms = int((datetime.now() - start_time).total_seconds() * 1000)
# Calculate quality score
quality_score = self._calculate_quality_score(content)
return ProviderResult(
provider_name=provider.name,
content=content,
duration_ms=duration_ms,
success=True,
quality_score=quality_score
)
except Exception as e:
duration_ms = int((datetime.now() - start_time).total_seconds() * 1000)
self.logger.error(f"{provider.name} failed: {e}")
return ProviderResult(
provider_name=provider.name,
content="",
duration_ms=duration_ms,
success=False,
error=str(e)
)
def _calculate_quality_score(self, content: str) -> float:
"""Calculate quality score for generated content"""
score = 0.0
# Length check (comprehensive is better)
if 500 < len(content) < 50000:
score += 0.2
# LaTeX structure validation
latex_indicators = [
r"\documentclass",
r"\begin{document}",
r"\section",
r"\subsection",
r"\begin{itemize}",
r"\end{document}"
]
found_indicators = sum(1 for ind in latex_indicators if ind in content)
score += (found_indicators / len(latex_indicators)) * 0.4
# Bracket matching
if content.count("{") == content.count("}"):
score += 0.2
# Environment closure
envs = ["document", "itemize", "enumerate"]
for env in envs:
if content.count(f"\\begin{{{env}}}") == content.count(f"\\end{{{env}}}"):
score += 0.1
# Has content beyond template
if len(content) > 1000:
score += 0.1
return min(score, 1.0)
def generate_parallel(
self,
prompt: str,
strategy: str = "race",
timeout_ms: int = 300000, # 5 minutes default
**kwargs
) -> ParallelResult:
"""
Execute prompt across multiple providers in parallel.
Args:
prompt: The prompt to send to all providers
strategy: "race", "consensus", or "majority"
timeout_ms: Maximum time to wait for results
**kwargs: Additional arguments for providers
Returns:
ParallelResult with selected content and metadata
"""
if not self.providers:
raise AIProcessingError("No providers available for parallel execution")
start_time = datetime.now()
all_results: List[ProviderResult] = []
# Submit all providers
futures = {}
for name, provider in self.providers.items():
if provider.is_available():
future = self.executor.submit(
self._generate_sync,
provider,
prompt,
**kwargs
)
futures[future] = name
# Wait for results based on strategy
if strategy == "race":
all_results = self._race_strategy(futures, timeout_ms)
elif strategy == "consensus":
all_results = self._consensus_strategy(futures, timeout_ms)
elif strategy == "majority":
all_results = self._majority_strategy(futures, timeout_ms)
else:
raise ValueError(f"Unknown strategy: {strategy}")
# Select best result
selected = self._select_result(all_results, strategy)
total_duration_ms = int((datetime.now() - start_time).total_seconds() * 1000)
self.logger.info(
f"Parallel generation complete: {strategy} strategy, "
f"{len(all_results)} providers, {selected.provider_name} selected, "
f"{total_duration_ms}ms"
)
return ParallelResult(
content=selected.content,
strategy=strategy,
providers_used=[r.provider_name for r in all_results if r.success],
total_duration_ms=total_duration_ms,
all_results=all_results,
selected_provider=selected.provider_name
)
def _race_strategy(
self,
futures: dict,
timeout_ms: int
) -> List[ProviderResult]:
"""Return first successful response"""
results = []
for future in as_completed(futures, timeout=timeout_ms / 1000):
try:
result = future.result()
results.append(result)
if result.success:
# Got a successful response, cancel remaining
for f in futures:
f.cancel()
break
except Exception as e:
self.logger.error(f"Future failed: {e}")
return results
def _consensus_strategy(
self,
futures: dict,
timeout_ms: int
) -> List[ProviderResult]:
"""Wait for all, return all results"""
results = []
for future in as_completed(futures, timeout=timeout_ms / 1000):
try:
result = future.result()
results.append(result)
except Exception as e:
self.logger.error(f"Future failed: {e}")
return results
def _majority_strategy(
self,
futures: dict,
timeout_ms: int
) -> List[ProviderResult]:
"""Wait for majority, select most common response"""
results = []
for future in as_completed(futures, timeout=timeout_ms / 1000):
try:
result = future.result()
results.append(result)
except Exception as e:
self.logger.error(f"Future failed: {e}")
return results
def _select_result(self, results: List[ProviderResult], strategy: str) -> ProviderResult:
"""Select best result based on strategy"""
successful = [r for r in results if r.success]
if not successful:
# Return first failed result with error info
return results[0] if results else ProviderResult(
provider_name="none",
content="",
duration_ms=0,
success=False,
error="All providers failed"
)
if strategy == "race" or len(successful) == 1:
return successful[0]
if strategy == "consensus":
# Select by quality score
return max(successful, key=lambda r: r.quality_score)
if strategy == "majority":
# Group by similar content (simplified - use longest)
return max(successful, key=lambda r: len(r.content))
return successful[0]
def fix_latex_parallel(
self,
latex_code: str,
error_log: str,
timeout_ms: int = 120000,
**kwargs
) -> ParallelResult:
"""Try to fix LaTeX across multiple providers in parallel"""
# Build fix prompt for each provider
results = []
start_time = datetime.now()
for name, provider in self.providers.items():
if provider.is_available():
try:
start = datetime.now()
fixed = provider.fix_latex(latex_code, error_log, **kwargs)
duration_ms = int((datetime.now() - start).total_seconds() * 1000)
# Score by checking if error patterns are reduced
quality = self._score_latex_fix(fixed, error_log)
results.append(ProviderResult(
provider_name=name,
content=fixed,
duration_ms=duration_ms,
success=True,
quality_score=quality
))
except Exception as e:
self.logger.error(f"{name} fix failed: {e}")
# Select best fix
if results:
selected = max(results, key=lambda r: r.quality_score)
total_duration_ms = int((datetime.now() - start_time).total_seconds() * 1000)
return ParallelResult(
content=selected.content,
strategy="consensus",
providers_used=[r.provider_name for r in results],
total_duration_ms=total_duration_ms,
all_results=results,
selected_provider=selected.provider_name
)
raise AIProcessingError("All providers failed to fix LaTeX")
def _score_latex_fix(self, fixed_latex: str, original_error: str) -> float:
"""Score a LaTeX fix attempt"""
score = 0.5 # Base score
# Check if common error patterns are addressed
error_patterns = [
("Undefined control sequence", r"\\[a-zA-Z]+"),
("Missing $ inserted", r"\$.*\$"),
("Runaway argument", r"\{.*\}"),
]
for error_msg, pattern in error_patterns:
if error_msg in original_error:
# If error was in original, check if pattern appears better
score += 0.1
# Validate bracket matching
if fixed_latex.count("{") == fixed_latex.count("}"):
score += 0.2
# Validate environment closure
envs = ["document", "itemize", "enumerate"]
for env in envs:
begin_count = fixed_latex.count(f"\\begin{{{env}}}")
end_count = fixed_latex.count(f"\\end{{{env}}}")
if begin_count == end_count:
score += 0.1
return min(score, 1.0)
def shutdown(self):
"""Shutdown the executor"""
self.executor.shutdown(wait=True)
def __del__(self):
self.shutdown()

View File

@@ -0,0 +1,343 @@
"""
Prompt Manager - Centralized prompt management using resumen.md as source of truth
"""
import re
import os
from pathlib import Path
from typing import Optional, Dict, Any
from config import settings
class PromptManager:
"""
Manages prompts for AI services, loading templates from latex/resumen.md
This is the SINGLE SOURCE OF TRUTH for academic summary generation.
"""
_instance = None
_prompt_cache: Optional[str] = None
_latex_preamble_cache: Optional[str] = None
# Path to the prompt template file
PROMPT_FILE_PATH = Path("latex/resumen.md")
def __new__(cls):
if cls._instance is None:
cls._instance = super(PromptManager, cls).__new__(cls)
return cls._instance
def _load_prompt_template(self) -> str:
"""Load the complete prompt template from resumen.md"""
if self._prompt_cache:
return self._prompt_cache
try:
file_path = self.PROMPT_FILE_PATH.resolve()
if not file_path.exists():
self._prompt_cache = self._get_fallback_prompt()
return self._prompt_cache
content = file_path.read_text(encoding="utf-8")
# The file has a markdown code block after "## Prompt Template"
# We need to find the content from "## Prompt Template" to the LAST ```
# (because there's a ```latex...``` block INSIDE the template)
# First, find where "## Prompt Template" starts
template_start = content.find("## Prompt Template")
if template_start == -1:
self._prompt_cache = self._get_fallback_prompt()
return self._prompt_cache
# Find the opening ``` after the header
after_header = content[template_start:]
code_block_start = after_header.find("```")
if code_block_start == -1:
self._prompt_cache = self._get_fallback_prompt()
return self._prompt_cache
# Skip the opening ``` and any language specifier
after_code_start = after_header[code_block_start + 3:]
first_newline = after_code_start.find("\n")
if first_newline != -1:
actual_content_start = template_start + code_block_start + 3 + first_newline + 1
else:
actual_content_start = template_start + code_block_start + 3
# Now find the LAST ``` that closes the main block
# We look for ``` followed by optional space and then newline or end
remaining = content[actual_content_start:]
# Find all positions of ``` in the remaining content
positions = []
pos = 0
while True:
found = remaining.find("```", pos)
if found == -1:
break
positions.append(found)
pos = found + 3
if not positions:
self._prompt_cache = self._get_fallback_prompt()
return self._prompt_cache
# The LAST ``` is the closing of the main block
# (all previous ``` are the latex block inside the template)
last_backtick_pos = positions[-1]
# Extract the content
template_content = content[actual_content_start:actual_content_start + last_backtick_pos]
# Remove leading newline if present
template_content = template_content.lstrip("\n")
self._prompt_cache = template_content
return self._prompt_cache
except Exception as e:
print(f"Error loading prompt file: {e}")
self._prompt_cache = self._get_fallback_prompt()
return self._prompt_cache
def _get_fallback_prompt(self) -> str:
"""Fallback prompt if resumen.md is not found"""
return """Sos un asistente académico experto. Creá un resumen extenso en LaTeX basado en la transcripción de clase.
## Transcripción de clase:
[PEGAR TRANSCRIPCIÓN AQUÍ]
## Material bibliográfico:
[PEGAR TEXTO DEL LIBRO/APUNTE O INDICAR QUE LO SUBISTE COMO ARCHIVO]
Generá un archivo LaTeX completo con:
- Estructura académica formal
- Mínimo 10 páginas de contenido
- Fórmulas matemáticas en LaTeX
- Tablas y diagramas cuando corresponda
"""
def _load_latex_preamble(self) -> str:
"""Extract the LaTeX preamble from resumen.md"""
if self._latex_preamble_cache:
return self._latex_preamble_cache
try:
file_path = self.PROMPT_FILE_PATH.resolve()
if not file_path.exists():
return self._get_default_preamble()
content = file_path.read_text(encoding="utf-8")
# Extract LaTeX code block in the template
match = re.search(
r"```latex\s*\n([\s\S]*?)\n```",
content
)
if match:
self._latex_preamble_cache = match.group(1).strip()
else:
self._latex_preamble_cache = self._get_default_preamble()
return self._latex_preamble_cache
except Exception as e:
print(f"Error loading LaTeX preamble: {e}")
return self._get_default_preamble()
def _get_default_preamble(self) -> str:
"""Default LaTeX preamble"""
return r"""\documentclass[11pt,a4paper]{article}
\usepackage[utf8]{inputenc}
\usepackage[spanish,provide=*]{babel}
\usepackage{amsmath,amssymb}
\usepackage{geometry}
\usepackage{graphicx}
\usepackage{tikz}
\usetikzlibrary{arrows.meta,positioning,shapes.geometric,calc}
\usepackage{booktabs}
\usepackage{enumitem}
\usepackage{fancyhdr}
\usepackage{titlesec}
\usepackage{tcolorbox}
\usepackage{array}
\usepackage{multirow}
\geometry{margin=2.5cm}
\pagestyle{fancy}
\fancyhf{}
\fancyhead[L]{[MATERIA] - CBC}
\fancyhead[R]{Clase [N]}
\fancyfoot[C]{\thepage}
% Cajas para destacar contenido
\newtcolorbox{definicion}[1][]{
colback=blue!5!white,
colframe=blue!75!black,
fonttitle=\bfseries,
title=#1
}
\newtcolorbox{importante}[1][]{
colback=red!5!white,
colframe=red!75!black,
fonttitle=\bfseries,
title=#1
}
\newtcolorbox{ejemplo}[1][]{
colback=green!5!white,
colframe=green!50!black,
fonttitle=\bfseries,
title=#1
}
"""
def get_latex_summary_prompt(
self,
transcription: str,
materia: str = "Economía",
bibliographic_text: Optional[str] = None,
class_number: Optional[int] = None
) -> str:
"""
Generate the complete prompt for LaTeX academic summary based on resumen.md template.
Args:
transcription: The class transcription text
materia: Subject name (default: "Economía")
bibliographic_text: Optional supporting text from books/notes
class_number: Optional class number for header
Returns:
Complete prompt string ready to send to AI
"""
template = self._load_prompt_template()
# CRITICAL: Prepend explicit instructions to force direct LaTeX generation
# (This doesn't modify resumen.md, just adds context before it)
explicit_instructions = """CRITICAL: Tu respuesta debe ser ÚNICAMENTE código LaTeX.
INSTRUCCIONES OBLIGATORIAS:
1. NO incluyas explicaciones previas
2. NO describas lo que vas a hacer
3. Comienza INMEDIATAMENTE con \\documentclass
4. Tu respuesta debe ser SOLO el código LaTeX fuente
5. Termina con \\end{document}
---
"""
prompt = explicit_instructions + template
# Replace placeholders
prompt = prompt.replace("[MATERIA]", materia)
# Insert transcription
if "[PEGAR TRANSCRIPCIÓN AQUÍ]" in prompt:
prompt = prompt.replace("[PEGAR TRANSCRIPCIÓN AQUÍ]", transcription)
else:
prompt += f"\n\n## Transcripción de clase:\n{transcription}"
# Insert bibliographic material
bib_text = bibliographic_text or "No se proporcionó material bibliográfico adicional."
if "[PEGAR TEXTO DEL LIBRO/APUNTE O INDICAR QUE LO SUBISTE COMO ARCHIVO]" in prompt:
prompt = prompt.replace(
"[PEGAR TEXTO DEL LIBRO/APUNTE O INDICAR QUE LO SUBISTE COMO ARCHIVO]",
bib_text
)
else:
prompt += f"\n\n## Material bibliográfico:\n{bib_text}"
# Add class number if provided
if class_number is not None:
prompt = prompt.replace("[N]", str(class_number))
return prompt
def get_latex_preamble(
self,
materia: str = "Economía",
class_number: Optional[int] = None
) -> str:
"""
Get the LaTeX preamble with placeholders replaced.
Args:
materia: Subject name
class_number: Optional class number
Returns:
Complete LaTeX preamble as string
"""
preamble = self._load_latex_preamble()
# Replace placeholders
preamble = preamble.replace("[MATERIA]", materia)
if class_number is not None:
preamble = preamble.replace("[N]", str(class_number))
return preamble
def get_latex_fix_prompt(self, latex_code: str, error_log: str) -> str:
"""Get prompt for fixing broken LaTeX code"""
return f"""I have a LaTeX file that failed to compile. Please fix the code.
COMPILER ERROR LOG:
{error_log[-3000:]}
BROKEN LATEX CODE:
{latex_code}
INSTRUCTIONS:
1. Analyze the error log to find the specific syntax error.
2. Fix the LaTeX code.
3. Return ONLY the full corrected LaTeX code.
4. Do not include markdown blocks or explanations.
5. Start immediately with \\documentclass.
6. Ensure all braces {{}} are properly balanced.
7. Ensure all environments \\begin{{...}} have matching \\end{{...}}.
8. Ensure all packages are properly declared.
"""
def extract_latex_from_response(self, response: str) -> Optional[str]:
"""
Extract clean LaTeX code from AI response.
Handles cases where AI wraps LaTeX in ```latex...``` blocks.
"""
if not response:
return None
# Try to find content inside ```latex ... ``` blocks
code_block_pattern = r"```(?:latex|tex)?\s*([\s\S]*?)\s*```"
match = re.search(code_block_pattern, response, re.IGNORECASE)
if match:
latex = match.group(1).strip()
else:
latex = response.strip()
# Verify it looks like LaTeX
if "\\documentclass" not in latex:
return None
# Clean up: remove anything before \documentclass
start_idx = latex.find("\\documentclass")
latex = latex[start_idx:]
# Clean up: remove anything after \end{document}
if "\\end{document}" in latex:
end_idx = latex.rfind("\\end{document}")
latex = latex[:end_idx + len("\\end{document}")]
return latex.strip()
# Singleton instance for easy import
prompt_manager = PromptManager()

View File

@@ -0,0 +1,80 @@
"""
AI Provider Factory (Factory Pattern)
"""
import logging
from typing import Dict, Type, Optional
from core import AIProcessingError
from .base_provider import AIProvider
from .claude_provider import ClaudeProvider
from .gemini_provider import GeminiProvider
from .parallel_provider import ParallelAIProvider
class AIProviderFactory:
"""Factory for creating AI providers with fallback and parallel execution"""
def __init__(self):
self.logger = logging.getLogger(__name__)
self._providers: Dict[str, AIProvider] = {
"claude": ClaudeProvider(),
"gemini": GeminiProvider(),
}
self._parallel_provider: Optional[ParallelAIProvider] = None
def get_provider(self, preferred: str = "gemini") -> AIProvider:
"""Get available provider with fallback"""
# Try preferred provider first
if preferred in self._providers:
provider = self._providers[preferred]
if provider.is_available():
self.logger.info(f"Using {preferred} provider")
return provider
# Fallback to any available provider
for name, provider in self._providers.items():
if provider.is_available():
self.logger.info(f"Falling back to {name} provider")
return provider
raise AIProcessingError("No AI providers available")
def get_all_available(self) -> Dict[str, AIProvider]:
"""Get all available providers"""
return {
name: provider
for name, provider in self._providers.items()
if provider.is_available()
}
def get_best_provider(self) -> AIProvider:
"""Get the best available provider (Claude > Gemini)"""
return self.get_provider("claude")
def get_parallel_provider(self, max_workers: int = 4) -> ParallelAIProvider:
"""Get parallel provider for racing multiple AI providers"""
available = self.get_all_available()
if not available:
raise AIProcessingError("No providers available for parallel execution")
if self._parallel_provider is None:
self._parallel_provider = ParallelAIProvider(
providers=available,
max_workers=max_workers
)
self.logger.info(
f"Created parallel provider with {len(available)} workers: "
f"{', '.join(available.keys())}"
)
return self._parallel_provider
def use_parallel(self) -> bool:
"""Check if parallel execution should be used (multiple providers available)"""
return len(self.get_all_available()) > 1
# Global instance
ai_provider_factory = AIProviderFactory()

256
services/ai_service.py Normal file
View File

@@ -0,0 +1,256 @@
"""
AI Service - Unified interface for AI providers with caching
"""
import logging
import hashlib
import time
from typing import Optional, Dict, Any
from threading import Lock
from config import settings
from core import AIProcessingError
from .ai.provider_factory import AIProviderFactory, ai_provider_factory
class LRUCache:
"""Thread-safe LRU Cache implementation"""
def __init__(self, max_size: int = 100, ttl: int = 3600):
self.max_size = max_size
self.ttl = ttl
self._cache: Dict[str, tuple[str, float]] = {}
self._order: list[str] = []
self._lock = Lock()
def _is_expired(self, timestamp: float) -> bool:
return (time.time() - timestamp) > self.ttl
def get(self, key: str) -> Optional[str]:
with self._lock:
if key not in self._cache:
return None
value, timestamp = self._cache[key]
if self._is_expired(timestamp):
del self._cache[key]
self._order.remove(key)
return None
# Move to end (most recently used)
self._order.remove(key)
self._order.append(key)
return value
def set(self, key: str, value: str) -> None:
with self._lock:
if key in self._cache:
self._order.remove(key)
elif len(self._order) >= self.max_size:
# Remove least recently used
oldest = self._order.pop(0)
del self._cache[oldest]
self._cache[key] = (value, time.time())
self._order.append(key)
def stats(self) -> Dict[str, int]:
with self._lock:
return {
"size": len(self._cache),
"max_size": self.max_size,
"hits": sum(1 for _, t in self._cache.values() if not self._is_expired(t))
}
class RateLimiter:
"""Token bucket rate limiter"""
def __init__(self, rate: float = 10, capacity: int = 20):
self.rate = rate # tokens per second
self.capacity = capacity
self.tokens = capacity
self.last_update = time.time()
self._lock = Lock()
def acquire(self, tokens: int = 1) -> float:
with self._lock:
now = time.time()
elapsed = now - self.last_update
self.last_update = now
self.tokens = min(self.capacity, self.tokens + elapsed * self.rate)
if self.tokens >= tokens:
self.tokens -= tokens
return 0.0
wait_time = (tokens - self.tokens) / self.rate
self.tokens = 0
return wait_time
class AIService:
"""Unified service for AI operations with caching and rate limiting"""
def __init__(self):
self.logger = logging.getLogger(__name__)
self._factory: Optional[AIProviderFactory] = None
self._prompt_cache = LRUCache(max_size=100, ttl=3600) # 1 hour TTL
self._rate_limiter = RateLimiter(rate=15, capacity=30)
self._stats = {
"total_requests": 0,
"cache_hits": 0,
"api_calls": 0
}
@property
def factory(self) -> AIProviderFactory:
"""Lazy initialization of provider factory"""
if self._factory is None:
self._factory = ai_provider_factory
return self._factory
def _get_cache_key(self, prompt: str, operation: str) -> str:
"""Generate cache key from prompt and operation"""
content = f"{operation}:{prompt[:500]}" # Limit prompt length
return hashlib.sha256(content.encode()).hexdigest()
def generate_text(
self,
prompt: str,
provider: Optional[str] = None,
max_tokens: int = 4096
) -> str:
"""Generate text using AI provider with caching"""
self._stats["total_requests"] += 1
cache_key = self._get_cache_key(prompt, f"generate:{provider or 'default'}")
# Check cache
cached_result = self._prompt_cache.get(cache_key)
if cached_result:
self._stats["cache_hits"] += 1
self.logger.debug(f"Cache hit for generate_text ({len(cached_result)} chars)")
return cached_result
# Apply rate limiting
wait_time = self._rate_limiter.acquire()
if wait_time > 0:
time.sleep(wait_time)
try:
self._stats["api_calls"] += 1
ai_provider = self.factory.get_provider(provider or 'gemini')
result = ai_provider.generate(prompt, max_tokens=max_tokens)
# Cache result
self._prompt_cache.set(cache_key, result)
return result
except AIProcessingError as e:
self.logger.error(f"AI generation failed: {e}")
return f"Error: {str(e)}"
def summarize(self, text: str, **kwargs) -> str:
"""Generate summary of text with caching"""
self._stats["total_requests"] += 1
cache_key = self._get_cache_key(text, "summarize")
cached_result = self._prompt_cache.get(cache_key)
if cached_result:
self._stats["cache_hits"] += 1
self.logger.debug(f"Cache hit for summarize ({len(cached_result)} chars)")
return cached_result
wait_time = self._rate_limiter.acquire()
if wait_time > 0:
time.sleep(wait_time)
try:
self._stats["api_calls"] += 1
provider = self.factory.get_best_provider()
result = provider.summarize(text, **kwargs)
self._prompt_cache.set(cache_key, result)
return result
except AIProcessingError as e:
self.logger.error(f"Summarization failed: {e}")
return f"Error: {str(e)}"
def correct_text(self, text: str, **kwargs) -> str:
"""Correct grammar and spelling with caching"""
self._stats["total_requests"] += 1
cache_key = self._get_cache_key(text, "correct")
cached_result = self._prompt_cache.get(cache_key)
if cached_result:
self._stats["cache_hits"] += 1
return cached_result
wait_time = self._rate_limiter.acquire()
if wait_time > 0:
time.sleep(wait_time)
try:
self._stats["api_calls"] += 1
provider = self.factory.get_best_provider()
result = provider.correct_text(text, **kwargs)
self._prompt_cache.set(cache_key, result)
return result
except AIProcessingError as e:
self.logger.error(f"Text correction failed: {e}")
return text
def classify_content(self, text: str, **kwargs) -> Dict[str, Any]:
"""Classify content into categories with caching"""
self._stats["total_requests"] += 1
# For classification, use a shorter text for cache key
short_text = text[:200]
cache_key = self._get_cache_key(short_text, "classify")
cached_result = self._prompt_cache.get(cache_key)
if cached_result:
self._stats["cache_hits"] += 1
import json
return json.loads(cached_result)
wait_time = self._rate_limiter.acquire()
if wait_time > 0:
time.sleep(wait_time)
try:
self._stats["api_calls"] += 1
provider = self.factory.get_best_provider()
result = provider.classify_content(text, **kwargs)
import json
self._prompt_cache.set(cache_key, json.dumps(result))
return result
except AIProcessingError as e:
self.logger.error(f"Classification failed: {e}")
return {"category": "otras_clases", "confidence": 0.0}
def get_stats(self) -> Dict[str, Any]:
"""Get service statistics"""
cache_stats = self._prompt_cache.stats()
hit_rate = (self._stats["cache_hits"] / self._stats["total_requests"] * 100) if self._stats["total_requests"] > 0 else 0
return {
**self._stats,
"cache_size": cache_stats["size"],
"cache_max_size": cache_stats["max_size"],
"cache_hit_rate": round(hit_rate, 2),
"rate_limiter": {
"tokens": self._rate_limiter.tokens,
"capacity": self._rate_limiter.capacity
}
}
def clear_cache(self) -> None:
"""Clear the prompt cache"""
self._prompt_cache = LRUCache(max_size=100, ttl=3600)
self.logger.info("AI service cache cleared")
# Global instance
ai_service = AIService()

247
services/gpu_detector.py Normal file
View File

@@ -0,0 +1,247 @@
"""
GPU Detection and Management Service
Provides unified interface for detecting and using NVIDIA (CUDA), AMD (ROCm), or CPU.
Fallback order: NVIDIA -> AMD -> CPU
"""
import logging
import os
import subprocess
import shutil
from enum import Enum
from typing import Dict, Any, Optional
logger = logging.getLogger(__name__)
# Try to import torch
try:
import torch
TORCH_AVAILABLE = True
except ImportError:
TORCH_AVAILABLE = False
class GPUType(Enum):
"""Supported GPU types"""
NVIDIA = "nvidia"
AMD = "amd"
CPU = "cpu"
class GPUDetector:
"""
Service for detecting and managing GPU resources.
Detects GPU type with fallback order: NVIDIA -> AMD -> CPU
Provides unified interface regardless of GPU vendor.
"""
def __init__(self):
self._gpu_type: Optional[GPUType] = None
self._device: Optional[str] = None
self._initialized: bool = False
def initialize(self) -> None:
"""Initialize GPU detection"""
if self._initialized:
return
self._gpu_type = self._detect_gpu_type()
self._device = self._get_device_string()
self._setup_environment()
self._initialized = True
logger.info(f"GPU Detector initialized: {self._gpu_type.value} -> {self._device}")
def _detect_gpu_type(self) -> GPUType:
"""
Detect available GPU type.
Order: NVIDIA -> AMD -> CPU
"""
# Check user preference first
preference = os.getenv("GPU_PREFERENCE", "auto").lower()
if preference == "cpu":
logger.info("GPU preference set to CPU, skipping GPU detection")
return GPUType.CPU
if not TORCH_AVAILABLE:
logger.warning("PyTorch not available, using CPU")
return GPUType.CPU
# Check NVIDIA first
if preference in ("auto", "nvidia"):
if self._check_nvidia():
logger.info("NVIDIA GPU detected via nvidia-smi")
return GPUType.NVIDIA
# Check AMD second
if preference in ("auto", "amd"):
if self._check_amd():
logger.info("AMD GPU detected via ROCm")
return GPUType.AMD
# Fallback to checking torch.cuda (works for both NVIDIA and ROCm)
if torch.cuda.is_available():
device_name = torch.cuda.get_device_name(0).lower()
if "nvidia" in device_name or "geforce" in device_name or "rtx" in device_name or "gtx" in device_name:
return GPUType.NVIDIA
elif "amd" in device_name or "radeon" in device_name or "rx" in device_name:
return GPUType.AMD
else:
# Unknown GPU vendor but CUDA works
logger.warning(f"Unknown GPU vendor: {device_name}, treating as NVIDIA-compatible")
return GPUType.NVIDIA
logger.info("No GPU detected, using CPU")
return GPUType.CPU
def _check_nvidia(self) -> bool:
"""Check if NVIDIA GPU is available using nvidia-smi"""
nvidia_smi = shutil.which("nvidia-smi")
if not nvidia_smi:
return False
try:
result = subprocess.run(
[nvidia_smi, "--query-gpu=name", "--format=csv,noheader"],
capture_output=True,
text=True,
timeout=5
)
return result.returncode == 0 and result.stdout.strip()
except Exception as e:
logger.debug(f"nvidia-smi check failed: {e}")
return False
def _check_amd(self) -> bool:
"""Check if AMD GPU is available using rocm-smi"""
rocm_smi = shutil.which("rocm-smi")
if not rocm_smi:
return False
try:
result = subprocess.run(
[rocm_smi, "--showproductname"],
capture_output=True,
text=True,
timeout=5
)
return result.returncode == 0 and "GPU" in result.stdout
except Exception as e:
logger.debug(f"rocm-smi check failed: {e}")
return False
def _setup_environment(self) -> None:
"""Set up environment variables for detected GPU"""
if self._gpu_type == GPUType.AMD:
# Set HSA override for AMD RX 6000 series (gfx1030)
hsa_version = os.getenv("HSA_OVERRIDE_GFX_VERSION", "10.3.0")
os.environ.setdefault("HSA_OVERRIDE_GFX_VERSION", hsa_version)
logger.info(f"Set HSA_OVERRIDE_GFX_VERSION={hsa_version}")
def _get_device_string(self) -> str:
"""Get PyTorch device string"""
if self._gpu_type in (GPUType.NVIDIA, GPUType.AMD):
return "cuda"
return "cpu"
@property
def gpu_type(self) -> GPUType:
"""Get detected GPU type"""
if not self._initialized:
self.initialize()
return self._gpu_type
@property
def device(self) -> str:
"""Get device string for PyTorch"""
if not self._initialized:
self.initialize()
return self._device
def get_device(self) -> "torch.device":
"""Get PyTorch device object"""
if not TORCH_AVAILABLE:
raise RuntimeError("PyTorch not available")
if not self._initialized:
self.initialize()
return torch.device(self._device)
def is_available(self) -> bool:
"""Check if GPU is available"""
if not self._initialized:
self.initialize()
return self._gpu_type in (GPUType.NVIDIA, GPUType.AMD)
def is_nvidia(self) -> bool:
"""Check if NVIDIA GPU is being used"""
if not self._initialized:
self.initialize()
return self._gpu_type == GPUType.NVIDIA
def is_amd(self) -> bool:
"""Check if AMD GPU is being used"""
if not self._initialized:
self.initialize()
return self._gpu_type == GPUType.AMD
def is_cpu(self) -> bool:
"""Check if CPU is being used"""
if not self._initialized:
self.initialize()
return self._gpu_type == GPUType.CPU
def get_device_name(self) -> str:
"""Get GPU device name"""
if not self._initialized:
self.initialize()
if self._gpu_type == GPUType.CPU:
return "CPU"
if TORCH_AVAILABLE and torch.cuda.is_available():
return torch.cuda.get_device_name(0)
return "Unknown"
def get_memory_info(self) -> Dict[str, Any]:
"""Get GPU memory information"""
if not self._initialized:
self.initialize()
if self._gpu_type == GPUType.CPU:
return {"type": "cpu", "error": "No GPU available"}
if not TORCH_AVAILABLE or not torch.cuda.is_available():
return {"type": self._gpu_type.value, "error": "CUDA not available"}
try:
props = torch.cuda.get_device_properties(0)
total = props.total_memory / 1024**3
allocated = torch.cuda.memory_allocated(0) / 1024**3
reserved = torch.cuda.memory_reserved(0) / 1024**3
return {
"type": self._gpu_type.value,
"device_name": props.name,
"total_gb": round(total, 2),
"allocated_gb": round(allocated, 2),
"reserved_gb": round(reserved, 2),
"free_gb": round(total - allocated, 2),
"usage_percent": round((allocated / total) * 100, 1)
}
except Exception as e:
return {"type": self._gpu_type.value, "error": str(e)}
def empty_cache(self) -> None:
"""Clear GPU memory cache"""
if not self._initialized:
self.initialize()
if TORCH_AVAILABLE and torch.cuda.is_available():
torch.cuda.empty_cache()
logger.debug("GPU cache cleared")
# Global singleton instance
gpu_detector = GPUDetector()

View File

@@ -0,0 +1,137 @@
"""
Performance metrics collector for CBCFacil
"""
import time
import threading
import psutil
import logging
from typing import Dict, Any, Optional
from datetime import datetime, timedelta
from contextlib import contextmanager
class MetricsCollector:
"""Collect and aggregate performance metrics"""
def __init__(self):
self.logger = logging.getLogger(__name__)
self._start_time = time.time()
self._request_count = 0
self._error_count = 0
self._total_latency = 0.0
self._latencies = []
self._lock = threading.Lock()
self._process = psutil.Process()
def record_request(self, latency: float, success: bool = True) -> None:
"""Record a request with latency"""
with self._lock:
self._request_count += 1
self._total_latency += latency
self._latencies.append(latency)
# Keep only last 1000 latencies for memory efficiency
if len(self._latencies) > 1000:
self._latencies = self._latencies[-1000:]
if not success:
self._error_count += 1
def get_latency_percentiles(self) -> Dict[str, float]:
"""Calculate latency percentiles"""
with self._lock:
if not self._latencies:
return {"p50": 0, "p95": 0, "p99": 0}
sorted_latencies = sorted(self._latencies)
n = len(sorted_latencies)
return {
"p50": sorted_latencies[int(n * 0.50)],
"p95": sorted_latencies[int(n * 0.95)],
"p99": sorted_latencies[int(n * 0.99)]
}
def get_system_metrics(self) -> Dict[str, Any]:
"""Get system resource metrics"""
try:
memory = self._process.memory_info()
cpu_percent = self._process.cpu_percent(interval=0.1)
return {
"cpu_percent": cpu_percent,
"memory_rss_mb": memory.rss / 1024 / 1024,
"memory_vms_mb": memory.vms / 1024 / 1024,
"thread_count": self._process.num_threads(),
"open_files": self._process.open_files(),
}
except Exception as e:
self.logger.warning(f"Error getting system metrics: {e}")
return {}
def get_summary(self) -> Dict[str, Any]:
"""Get metrics summary"""
with self._lock:
uptime = time.time() - self._start_time
latency_pcts = self.get_latency_percentiles()
return {
"uptime_seconds": round(uptime, 2),
"total_requests": self._request_count,
"error_count": self._error_count,
"error_rate": round(self._error_count / max(1, self._request_count) * 100, 2),
"requests_per_second": round(self._request_count / max(1, uptime), 2),
"average_latency_ms": round(self._total_latency / max(1, self._request_count) * 1000, 2),
"latency_p50_ms": round(latency_pcts["p50"] * 1000, 2),
"latency_p95_ms": round(latency_pcts["p95"] * 1000, 2),
"latency_p99_ms": round(latency_pcts["p99"] * 1000, 2),
}
def reset(self) -> None:
"""Reset metrics"""
with self._lock:
self._request_count = 0
self._error_count = 0
self._total_latency = 0.0
self._latencies = []
self._start_time = time.time()
class LatencyTracker:
"""Context manager for tracking operation latency"""
def __init__(self, collector: MetricsCollector, operation: str):
self.collector = collector
self.operation = operation
self.start_time: Optional[float] = None
self.success = True
def __enter__(self):
self.start_time = time.time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
latency = time.time() - self.start_time
success = exc_type is None
self.collector.record_request(latency, success)
return False # Don't suppress exceptions
# Global metrics collector
metrics_collector = MetricsCollector()
@contextmanager
def track_latency(operation: str = "unknown"):
"""Convenience function for latency tracking"""
with LatencyTracker(metrics_collector, operation):
yield
def get_performance_report() -> Dict[str, Any]:
"""Generate comprehensive performance report"""
return {
"metrics": metrics_collector.get_summary(),
"system": metrics_collector.get_system_metrics(),
"timestamp": datetime.utcnow().isoformat()
}

353
services/notion_service.py Normal file
View File

@@ -0,0 +1,353 @@
"""
Notion integration service with official SDK
"""
import logging
from typing import Optional, Dict, Any, List
from pathlib import Path
from datetime import datetime
import time
try:
from notion_client import Client
from notion_client.errors import APIResponseError
NOTION_AVAILABLE = True
except ImportError:
NOTION_AVAILABLE = False
Client = None
APIResponseError = Exception
from config import settings
class NotionService:
"""Enhanced Notion API integration service"""
def __init__(self):
self.logger = logging.getLogger(__name__)
self._client: Optional[Client] = None
self._database_id: Optional[str] = None
def configure(self, token: str, database_id: str) -> None:
"""Configure Notion with official SDK"""
if not NOTION_AVAILABLE:
self.logger.error(
"notion-client not installed. Install with: pip install notion-client"
)
return
self._client = Client(auth=token)
self._database_id = database_id
self.logger.info("Notion service configured with official SDK")
@property
def is_configured(self) -> bool:
"""Check if Notion is configured"""
return bool(self._client and self._database_id and NOTION_AVAILABLE)
def _rate_limited_request(self, func, *args, **kwargs):
"""Execute request with rate limiting and retry"""
max_retries = 3
base_delay = 1
for attempt in range(max_retries):
try:
return func(*args, **kwargs)
except APIResponseError as e:
if hasattr(e, "code") and e.code == "rate_limited":
delay = base_delay * (2**attempt)
self.logger.warning(f"Rate limited by Notion, waiting {delay}s")
time.sleep(delay)
else:
raise
raise Exception("Max retries exceeded for Notion API")
def create_page_with_summary(
self, title: str, summary: str, metadata: Dict[str, Any]
) -> Optional[str]:
"""Create a new page in Notion (database or parent page) with summary content"""
if not self.is_configured:
self.logger.warning("Notion not configured, skipping upload")
return None
try:
# Determinar si es database o página padre
use_as_page = metadata.get("use_as_page", False)
if use_as_page:
# Crear página dentro de otra página
page = self._rate_limited_request(
self._client.pages.create,
parent={"page_id": self._database_id},
properties={"title": [{"text": {"content": title[:100]}}]},
)
else:
# Crear página en database (método original)
properties = {"Name": {"title": [{"text": {"content": title[:100]}}]}}
# Agregar status si la DB lo soporta
if metadata.get("add_status", True):
properties["Status"] = {"select": {"name": "Procesado"}}
# Agregar tipo de archivo si está disponible Y add_status está habilitado
if metadata.get("add_status", False) and metadata.get("file_type"):
properties["Tipo"] = {
"select": {" name": metadata["file_type"].upper()}
}
page = self._rate_limited_request(
self._client.pages.create,
parent={"database_id": self._database_id},
properties=properties,
)
page_id = page["id"]
self.logger.info(f"✅ Notion page created: {page_id}")
# Agregar contenido del resumen como bloques
self._add_summary_content(page_id, summary, metadata.get("pdf_path"))
return page_id
except Exception as e:
self.logger.error(f"❌ Error creating Notion page: {e}")
return None
try:
# Preparar properties de la página
properties = {
"Name": {
"title": [
{
"text": {
"content": title[:100] # Notion limit
}
}
]
}
}
# Agregar status si la DB lo soporta
if metadata.get("add_status", True):
properties["Status"] = {"select": {"name": "Procesado"}}
# Agregar tipo de archivo si está disponible
if metadata.get("file_type"):
properties["Tipo"] = {"select": {"name": metadata["file_type"].upper()}}
# Crear página
page = self._rate_limited_request(
self._client.pages.create,
parent={"database_id": self._database_id},
properties=properties,
)
page_id = page["id"]
self.logger.info(f"✅ Notion page created: {page_id}")
# Agregar contenido del resumen como bloques
self._add_summary_content(page_id, summary, metadata.get("pdf_path"))
return page_id
except Exception as e:
self.logger.error(f"❌ Error creating Notion page: {e}")
return None
def _add_summary_content(
self, page_id: str, summary: str, pdf_path: Optional[Path] = None
) -> bool:
"""Add summary content as Notion blocks"""
try:
blocks = []
# Agregar nota sobre el PDF si existe
if pdf_path and pdf_path.exists():
blocks.append(
{
"object": "block",
"type": "callout",
"callout": {
"rich_text": [
{
"type": "text",
"text": {
"content": f"📄 Documento generado automáticamente: {pdf_path.name}"
},
}
],
"icon": {"emoji": "📄"},
},
}
)
# Agregar bloques del resumen
summary_blocks = self._parse_markdown_to_blocks(summary)
blocks.extend(summary_blocks)
# Agregar footer
blocks.append({"object": "block", "type": "divider", "divider": {}})
blocks.append(
{
"object": "block",
"type": "paragraph",
"paragraph": {
"rich_text": [
{
"type": "text",
"text": {
"content": f"Generado por CBCFacil el {datetime.now().strftime('%d/%m/%Y %H:%M')}"
},
"annotations": {"italic": True, "color": "gray"},
}
]
},
}
)
# Notion API limita a 100 bloques por request
if blocks:
for i in range(0, len(blocks), 100):
batch = blocks[i : i + 100]
self._rate_limited_request(
self._client.blocks.children.append,
block_id=page_id,
children=batch,
)
self.logger.info(f"✅ Added {len(blocks)} blocks to Notion page")
return True
except Exception as e:
self.logger.error(f"❌ Error adding content blocks: {e}")
return False
def _parse_markdown_to_blocks(self, markdown: str) -> List[Dict]:
"""Convert markdown to Notion blocks"""
blocks = []
lines = markdown.split("\n")
for line in lines:
line = line.strip()
if not line:
continue
# Headings
if line.startswith("# "):
text = line[2:].strip()[:2000]
if text:
blocks.append(
{
"object": "block",
"type": "heading_1",
"heading_1": {
"rich_text": [
{"type": "text", "text": {"content": text}}
]
},
}
)
elif line.startswith("## "):
text = line[3:].strip()[:2000]
if text:
blocks.append(
{
"object": "block",
"type": "heading_2",
"heading_2": {
"rich_text": [
{"type": "text", "text": {"content": text}}
]
},
}
)
elif line.startswith("### "):
text = line[4:].strip()[:2000]
if text:
blocks.append(
{
"object": "block",
"type": "heading_3",
"heading_3": {
"rich_text": [
{"type": "text", "text": {"content": text}}
]
},
}
)
# Bullet points
elif line.startswith("- ") or line.startswith("* "):
text = line[2:].strip()[:2000]
if text:
blocks.append(
{
"object": "block",
"type": "bulleted_list_item",
"bulleted_list_item": {
"rich_text": [
{"type": "text", "text": {"content": text}}
]
},
}
)
# Divider
elif line.strip() == "---":
blocks.append({"object": "block", "type": "divider", "divider": {}})
# Paragraph (skip footer lines)
elif not line.startswith("*Generado por"):
text = line[:2000]
if text:
blocks.append(
{
"object": "block",
"type": "paragraph",
"paragraph": {
"rich_text": [
{"type": "text", "text": {"content": text}}
]
},
}
)
return blocks
def upload_pdf_legacy(self, pdf_path: Path, title: str) -> bool:
"""Legacy method - creates simple page (backward compatibility)"""
if not self.is_configured:
self.logger.warning("Notion not configured, skipping upload")
return False
try:
# Crear página simple
page_id = self.create_page_with_summary(
title=title,
summary=f"Documento procesado: {title}",
metadata={"file_type": "PDF", "pdf_path": pdf_path},
)
return bool(page_id)
except Exception as e:
self.logger.error(f"Error uploading PDF to Notion: {e}")
return False
# Alias para backward compatibility
def upload_pdf(self, pdf_path: Path, title: str) -> bool:
"""Upload PDF info to Notion (alias for backward compatibility)"""
return self.upload_pdf_legacy(pdf_path, title)
def upload_pdf_as_file(self, pdf_path: Path, title: str) -> bool:
"""Upload PDF info as file (alias for backward compatibility)"""
return self.upload_pdf_legacy(pdf_path, title)
# Global instance
notion_service = NotionService()
def upload_to_notion(pdf_path: Path, title: str) -> bool:
"""Legacy function for backward compatibility"""
return notion_service.upload_pdf(pdf_path, title)

View File

@@ -0,0 +1,203 @@
"""
Notion integration service
"""
import logging
import base64
from typing import Optional
from pathlib import Path
try:
import requests
REQUESTS_AVAILABLE = True
except ImportError:
REQUESTS_AVAILABLE = False
requests = None
from config import settings
class NotionService:
"""Service for Notion API integration"""
def __init__(self):
self.logger = logging.getLogger(__name__)
self._token: Optional[str] = None
self._database_id: Optional[str] = None
self._base_url = "https://api.notion.com/v1"
def configure(self, token: str, database_id: str) -> None:
"""Configure Notion credentials"""
self._token = token
self._database_id = database_id
self.logger.info("Notion service configured")
@property
def is_configured(self) -> bool:
"""Check if Notion is configured"""
return bool(self._token and self._database_id)
def _get_headers(self) -> dict:
"""Get headers for Notion API requests"""
return {
"Authorization": f"Bearer {self._token}",
"Content-Type": "application/json",
"Notion-Version": "2022-06-28"
}
def upload_pdf(self, pdf_path: Path, title: str) -> bool:
"""Upload PDF to Notion database"""
if not self.is_configured:
self.logger.warning("Notion not configured, skipping upload")
return False
if not REQUESTS_AVAILABLE:
self.logger.error("requests library not available for Notion upload")
return False
if not pdf_path.exists():
self.logger.error(f"PDF file not found: {pdf_path}")
return False
try:
# Read and encode PDF
with open(pdf_path, 'rb') as f:
pdf_data = base64.b64encode(f.read()).decode('utf-8')
# Prepare the page data
page_data = {
"parent": {"database_id": self._database_id},
"properties": {
"Name": {
"title": [
{
"text": {
"content": title
}
}
]
},
"Status": {
"select": {
"name": "Procesado"
}
}
},
"children": [
{
"object": "block",
"type": "paragraph",
"paragraph": {
"rich_text": [
{
"type": "text",
"text": {
"content": f"Documento generado automáticamente: {title}"
}
}
]
}
},
{
"object": "block",
"type": "file",
"file": {
"type": "external",
"external": {
"url": f"data:application/pdf;base64,{pdf_data}"
}
}
}
]
}
# Create page in database
response = requests.post(
f"{self._base_url}/pages",
headers=self._get_headers(),
json=page_data,
timeout=30
)
if response.status_code == 200:
self.logger.info(f"PDF uploaded to Notion successfully: {title}")
return True
else:
self.logger.error(f"Notion API error: {response.status_code} - {response.text}")
return False
except Exception as e:
self.logger.error(f"Error uploading PDF to Notion: {e}")
return False
def upload_pdf_as_file(self, pdf_path: Path, title: str) -> bool:
"""Upload PDF as a file block (alternative method)"""
if not self.is_configured:
self.logger.warning("Notion not configured, skipping upload")
return False
if not REQUESTS_AVAILABLE:
self.logger.error("requests library not available for Notion upload")
return False
if not pdf_path.exists():
self.logger.error(f"PDF file not found: {pdf_path}")
return False
try:
# For simplicity, we'll create a page with just the title and a link placeholder
# In a real implementation, you'd need to upload the file to Notion's file storage
page_data = {
"parent": {"database_id": self._database_id},
"properties": {
"Name": {
"title": [
{
"text": {
"content": title
}
}
]
},
"Status": {
"select": {
"name": "Procesado"
}
},
"File Path": {
"rich_text": [
{
"text": {
"content": str(pdf_path)
}
}
]
}
}
}
response = requests.post(
f"{self._base_url}/pages",
headers=self._get_headers(),
json=page_data,
timeout=30
)
if response.status_code == 200:
self.logger.info(f"PDF uploaded to Notion successfully: {title}")
return True
else:
self.logger.error(f"Notion API error: {response.status_code} - {response.text}")
return False
except Exception as e:
self.logger.error(f"Error uploading PDF to Notion: {e}")
return False
# Global instance
notion_service = NotionService()
def upload_to_notion(pdf_path: Path, title: str) -> bool:
"""Legacy function for backward compatibility"""
return notion_service.upload_pdf(pdf_path, title)

View File

@@ -0,0 +1,91 @@
"""
Telegram notification service
"""
import logging
import time
from typing import Optional
from datetime import datetime
from config import settings
try:
import requests
REQUESTS_AVAILABLE = True
except ImportError:
REQUESTS_AVAILABLE = False
class TelegramService:
"""Service for sending Telegram notifications"""
def __init__(self):
self.logger = logging.getLogger(__name__)
self._token: Optional[str] = None
self._chat_id: Optional[str] = None
self._last_error_cache: dict = {}
def configure(self, token: str, chat_id: str) -> None:
"""Configure Telegram credentials"""
self._token = token
self._chat_id = chat_id
self.logger.info("Telegram service configured")
@property
def is_configured(self) -> bool:
"""Check if Telegram is configured"""
return bool(self._token and self._chat_id)
def _send_request(self, endpoint: str, data: dict, retries: int = 3, delay: int = 2) -> bool:
"""Make API request to Telegram"""
if not REQUESTS_AVAILABLE:
self.logger.warning("requests library not available")
return False
url = f"https://api.telegram.org/bot{self._token}/{endpoint}"
for attempt in range(retries):
try:
resp = requests.post(url, data=data, timeout=10)
if resp.status_code == 200:
return True
else:
self.logger.error(f"Telegram API error: {resp.status_code}")
except Exception as e:
self.logger.error(f"Telegram request failed (attempt {attempt+1}/{retries}): {e}")
time.sleep(delay)
return False
def send_message(self, message: str) -> bool:
"""Send a text message to Telegram"""
if not self.is_configured:
self.logger.warning("Telegram not configured, skipping notification")
return False
data = {"chat_id": self._chat_id, "text": message}
return self._send_request("sendMessage", data)
def send_start_notification(self) -> bool:
"""Send service start notification"""
message = "CBCFacil Service Started - AI document processing active"
return self.send_message(message)
def send_error_notification(self, error_key: str, error_message: str) -> bool:
"""Send error notification with throttling"""
now = datetime.utcnow()
prev = self._last_error_cache.get(error_key)
if prev is None:
self._last_error_cache[error_key] = (error_message, now)
else:
prev_msg, prev_time = prev
if error_message != prev_msg or (now - prev_time).total_seconds() > settings.ERROR_THROTTLE_SECONDS:
self._last_error_cache[error_key] = (error_message, now)
else:
return False
return self.send_message(f"Error: {error_message}")
# Global instance
telegram_service = TelegramService()
def send_telegram_message(message: str, retries: int = 3, delay: int = 2) -> bool:
"""Legacy function for backward compatibility"""
return telegram_service.send_message(message)

172
services/vram_manager.py Normal file
View File

@@ -0,0 +1,172 @@
"""
VRAM/GPU memory management service
"""
import gc
import logging
import os
import time
from datetime import datetime, timedelta
from typing import Optional, Dict, Any
from core import BaseService
from config import settings
try:
import torch
TORCH_AVAILABLE = True
except ImportError:
TORCH_AVAILABLE = False
# Import gpu_detector after torch check
from .gpu_detector import gpu_detector, GPUType
class VRAMManager(BaseService):
"""Service for managing GPU VRAM usage"""
def __init__(self):
super().__init__("VRAMManager")
self._whisper_model = None
self._ocr_models = None
self._trocr_models = None
self._models_last_used: Optional[datetime] = None
self._cleanup_threshold = 0.7
self._cleanup_interval = 300
self._last_cleanup: Optional[datetime] = None
def initialize(self) -> None:
"""Initialize VRAM manager"""
# Initialize GPU detector first
gpu_detector.initialize()
if not TORCH_AVAILABLE:
self.logger.warning("PyTorch not available - VRAM management disabled")
return
if gpu_detector.is_available():
gpu_type = gpu_detector.gpu_type
device_name = gpu_detector.get_device_name()
if gpu_type == GPUType.AMD:
self.logger.info(f"VRAM Manager initialized with AMD ROCm: {device_name}")
elif gpu_type == GPUType.NVIDIA:
os.environ['CUDA_VISIBLE_DEVICES'] = settings.CUDA_VISIBLE_DEVICES
if settings.PYTORCH_CUDA_ALLOC_CONF:
torch.backends.cuda.max_split_size_mb = int(settings.PYTORCH_CUDA_ALLOC_CONF.split(':')[1])
self.logger.info(f"VRAM Manager initialized with NVIDIA CUDA: {device_name}")
else:
self.logger.warning("No GPU available - GPU acceleration disabled")
def cleanup(self) -> None:
"""Cleanup all GPU models"""
if not TORCH_AVAILABLE or not torch.cuda.is_available():
return
models_freed = []
if self._whisper_model is not None:
try:
del self._whisper_model
self._whisper_model = None
models_freed.append("Whisper")
except Exception as e:
self.logger.error(f"Error freeing Whisper VRAM: {e}")
if self._ocr_models is not None:
try:
self._ocr_models = None
models_freed.append("OCR")
except Exception as e:
self.logger.error(f"Error freeing OCR VRAM: {e}")
if self._trocr_models is not None:
try:
if isinstance(self._trocr_models, dict):
model = self._trocr_models.get('model')
if model is not None:
model.to('cpu')
models_freed.append("TrOCR")
torch.cuda.empty_cache()
except Exception as e:
self.logger.error(f"Error freeing TrOCR VRAM: {e}")
self._whisper_model = None
self._ocr_models = None
self._trocr_models = None
self._models_last_used = None
if models_freed:
self.logger.info(f"Freed VRAM for models: {', '.join(models_freed)}")
self._force_aggressive_cleanup()
def update_usage(self) -> None:
"""Update usage timestamp"""
self._models_last_used = datetime.utcnow()
self.logger.debug(f"VRAM usage timestamp updated")
def should_cleanup(self) -> bool:
"""Check if cleanup should be performed"""
if not TORCH_AVAILABLE or not torch.cuda.is_available():
return False
if self._last_cleanup is None:
return True
if (datetime.utcnow() - self._last_cleanup).total_seconds() < self._cleanup_interval:
return False
allocated = torch.cuda.memory_allocated(0)
total = torch.cuda.get_device_properties(0).total_memory
return allocated / total > self._cleanup_threshold
def lazy_cleanup(self) -> None:
"""Perform cleanup if needed"""
if self.should_cleanup():
self.cleanup()
self._last_cleanup = datetime.utcnow()
def _force_aggressive_cleanup(self) -> None:
"""Force aggressive VRAM cleanup"""
if not TORCH_AVAILABLE or not torch.cuda.is_available():
return
try:
before_allocated = torch.cuda.memory_allocated(0) / 1024**3
before_reserved = torch.cuda.memory_reserved(0) / 1024**3
self.logger.debug(f"Before cleanup - Allocated: {before_allocated:.2f}GB, Reserved: {before_reserved:.2f}GB")
gc.collect(0)
torch.cuda.empty_cache()
after_allocated = torch.cuda.memory_allocated(0) / 1024**3
after_reserved = torch.cuda.memory_reserved(0) / 1024**3
self.logger.debug(f"After cleanup - Allocated: {after_allocated:.2f}GB, Reserved: {after_reserved:.2f}GB")
if after_reserved < before_reserved:
self.logger.info(f"VRAM freed: {(before_reserved - after_reserved):.2f}GB")
except Exception as e:
self.logger.error(f"Error in aggressive VRAM cleanup: {e}")
def get_usage(self) -> Dict[str, Any]:
"""Get VRAM usage information"""
if not TORCH_AVAILABLE:
return {'error': 'PyTorch not available'}
if not torch.cuda.is_available():
return {'error': 'CUDA not available'}
total = torch.cuda.get_device_properties(0).total_memory / 1024**3
allocated = torch.cuda.memory_allocated(0) / 1024**3
cached = torch.cuda.memory_reserved(0) / 1024**3
free = total - allocated
return {
'total_gb': round(total, 2),
'allocated_gb': round(allocated, 2),
'cached_gb': round(cached, 2),
'free_gb': round(free, 2),
'whisper_loaded': self._whisper_model is not None,
'ocr_models_loaded': self._ocr_models is not None,
'trocr_models_loaded': self._trocr_models is not None,
'last_used': self._models_last_used.isoformat() if self._models_last_used else None,
'timeout_seconds': settings.MODEL_TIMEOUT_SECONDS
}
def force_free(self) -> str:
"""Force immediate VRAM free"""
self.cleanup()
return "VRAM freed successfully"
# Global instance
vram_manager = VRAMManager()

290
services/webdav_service.py Normal file
View File

@@ -0,0 +1,290 @@
"""
WebDAV service for Nextcloud integration
"""
import logging
import os
import time
import unicodedata
import re
from pathlib import Path
from typing import Optional, List, Dict, Tuple
from contextlib import contextmanager
from concurrent.futures import ThreadPoolExecutor, as_completed
import requests
from requests.auth import HTTPBasicAuth
from requests.adapters import HTTPAdapter
from config import settings
from core import WebDAVError
class WebDAVService:
"""Service for WebDAV operations with Nextcloud"""
def __init__(self):
self.session: Optional[requests.Session] = None
self.logger = logging.getLogger(__name__)
self._retry_delay = 1
self._max_retries = settings.WEBDAV_MAX_RETRIES
def initialize(self) -> None:
"""Initialize WebDAV session"""
if not settings.has_webdav_config:
raise WebDAVError("WebDAV credentials not configured")
self.session = requests.Session()
self.session.auth = HTTPBasicAuth(settings.NEXTCLOUD_USER, settings.NEXTCLOUD_PASSWORD)
# Configure HTTP adapter with retry strategy
adapter = HTTPAdapter(
max_retries=0, # We'll handle retries manually
pool_connections=10,
pool_maxsize=20
)
self.session.mount('https://', adapter)
self.session.mount('http://', adapter)
# Test connection
try:
self._request('GET', '', timeout=5)
self.logger.info("WebDAV connection established")
except Exception as e:
raise WebDAVError(f"Failed to connect to WebDAV: {e}")
def cleanup(self) -> None:
"""Cleanup WebDAV session"""
if self.session:
self.session.close()
self.session = None
@staticmethod
def normalize_path(path: str) -> str:
"""Normalize remote paths to a consistent representation"""
if not path:
return ""
normalized = unicodedata.normalize("NFC", str(path)).strip()
if not normalized:
return ""
normalized = normalized.replace("\\", "/")
normalized = re.sub(r"/+", "/", normalized)
return normalized.lstrip("/")
def _build_url(self, remote_path: str) -> str:
"""Build WebDAV URL"""
path = self.normalize_path(remote_path)
base_url = settings.WEBDAV_ENDPOINT.rstrip('/')
return f"{base_url}/{path}"
def _request(self, method: str, remote_path: str, **kwargs) -> requests.Response:
"""Make HTTP request to WebDAV with retries"""
if not self.session:
raise WebDAVError("WebDAV session not initialized")
url = self._build_url(remote_path)
timeout = kwargs.pop('timeout', settings.HTTP_TIMEOUT)
for attempt in range(self._max_retries):
try:
response = self.session.request(method, url, timeout=timeout, **kwargs)
if response.status_code < 400:
return response
elif response.status_code == 404:
raise WebDAVError(f"Resource not found: {remote_path}")
else:
raise WebDAVError(f"HTTP {response.status_code}: {response.text}")
except (requests.RequestException, requests.Timeout) as e:
if attempt == self._max_retries - 1:
raise WebDAVError(f"Request failed after {self._max_retries} retries: {e}")
delay = self._retry_delay * (2 ** attempt)
self.logger.warning(f"Request failed (attempt {attempt + 1}/{self._max_retries}), retrying in {delay}s...")
time.sleep(delay)
raise WebDAVError("Max retries exceeded")
def list(self, remote_path: str = "") -> List[str]:
"""List files in remote directory"""
self.logger.debug(f"Listing remote directory: {remote_path}")
response = self._request('PROPFIND', remote_path, headers={'Depth': '1'})
return self._parse_propfind_response(response.text)
def _parse_propfind_response(self, xml_response: str) -> List[str]:
"""Parse PROPFIND XML response and return only files (not directories)"""
# Simple parser for PROPFIND response
files = []
try:
import xml.etree.ElementTree as ET
from urllib.parse import urlparse, unquote
root = ET.fromstring(xml_response)
# Get the WebDAV path from settings
parsed_url = urlparse(settings.NEXTCLOUD_URL)
webdav_path = parsed_url.path.rstrip('/') # e.g. /remote.php/webdav
# Find all response elements
for response in root.findall('.//{DAV:}response'):
href = response.find('.//{DAV:}href')
if href is None or href.text is None:
continue
href_text = unquote(href.text) # Decode URL encoding
# Check if this is a directory (has collection resourcetype)
propstat = response.find('.//{DAV:}propstat')
is_directory = False
if propstat is not None:
prop = propstat.find('.//{DAV:}prop')
if prop is not None:
resourcetype = prop.find('.//{DAV:}resourcetype')
if resourcetype is not None and resourcetype.find('.//{DAV:}collection') is not None:
is_directory = True
# Skip directories
if is_directory:
continue
# Also skip paths ending with / (another way to detect directories)
if href_text.endswith('/'):
continue
# Remove base URL from href
base_url = settings.NEXTCLOUD_URL.rstrip('/')
if href_text.startswith(base_url):
href_text = href_text[len(base_url):]
# Also strip the webdav path if it's there
if href_text.startswith(webdav_path):
href_text = href_text[len(webdav_path):]
# Clean up the path
href_text = href_text.lstrip('/')
if href_text: # Skip empty paths (root directory)
files.append(href_text)
except Exception as e:
self.logger.error(f"Error parsing PROPFIND response: {e}")
return files
def download(self, remote_path: str, local_path: Path) -> None:
"""Download file from WebDAV"""
self.logger.info(f"Downloading {remote_path} to {local_path}")
# Ensure local directory exists
local_path.parent.mkdir(parents=True, exist_ok=True)
response = self._request('GET', remote_path, stream=True)
# Use larger buffer size for better performance
with open(local_path, 'wb', buffering=65536) as f:
for chunk in response.iter_content(chunk_size=settings.DOWNLOAD_CHUNK_SIZE):
if chunk:
f.write(chunk)
self.logger.debug(f"Download completed: {local_path}")
def upload(self, local_path: Path, remote_path: str) -> None:
"""Upload file to WebDAV"""
self.logger.info(f"Uploading {local_path} to {remote_path}")
# Ensure remote directory exists
remote_dir = self.normalize_path(remote_path)
if '/' in remote_dir:
dir_path = '/'.join(remote_dir.split('/')[:-1])
self.makedirs(dir_path)
with open(local_path, 'rb') as f:
self._request('PUT', remote_path, data=f)
self.logger.debug(f"Upload completed: {remote_path}")
def mkdir(self, remote_path: str) -> None:
"""Create directory on WebDAV"""
self.makedirs(remote_path)
def makedirs(self, remote_path: str) -> None:
"""Create directory and parent directories on WebDAV"""
path = self.normalize_path(remote_path)
if not path:
return
parts = path.split('/')
current = ""
for part in parts:
current = f"{current}/{part}" if current else part
try:
self._request('MKCOL', current)
self.logger.debug(f"Created directory: {current}")
except WebDAVError as e:
# Directory might already exist (409 Conflict or 405 MethodNotAllowed is OK)
if '409' not in str(e) and '405' not in str(e):
raise
def delete(self, remote_path: str) -> None:
"""Delete file or directory from WebDAV"""
self.logger.info(f"Deleting remote path: {remote_path}")
self._request('DELETE', remote_path)
def exists(self, remote_path: str) -> bool:
"""Check if remote path exists"""
try:
self._request('HEAD', remote_path)
return True
except WebDAVError:
return False
def upload_batch(
self,
files: List[Tuple[Path, str]],
max_workers: int = 4,
timeout: int = 120
) -> Dict[str, bool]:
"""
Upload multiple files concurrently.
Args:
files: List of (local_path, remote_path) tuples
max_workers: Maximum concurrent uploads
timeout: Timeout per upload in seconds
Returns:
Dict mapping remote_path to success status
"""
if not files:
return {}
results: Dict[str, bool] = {}
with ThreadPoolExecutor(max_workers=max_workers) as executor:
# Submit all upload tasks
future_to_path = {
executor.submit(self.upload, local, remote): remote
for local, remote in files
}
# Collect results as they complete
for future in as_completed(future_to_path, timeout=timeout):
remote_path = future_to_path[future]
try:
future.result()
results[remote_path] = True
self.logger.info(f"Successfully uploaded: {remote_path}")
except Exception as e:
results[remote_path] = False
self.logger.error(f"Failed to upload {remote_path}: {e}")
failed_count = sum(1 for success in results.values() if not success)
if failed_count > 0:
self.logger.warning(
f"Batch upload completed with {failed_count} failures "
f"({len(results) - failed_count}/{len(results)} successful)"
)
else:
self.logger.info(
f"Batch upload completed: {len(results)} files uploaded successfully"
)
return results
# Global instance
webdav_service = WebDAVService()

165
setup.py Normal file
View File

@@ -0,0 +1,165 @@
#!/usr/bin/env python3
"""
Setup script for CBCFacil
"""
import os
import sys
import subprocess
import platform
from pathlib import Path
def check_python_version():
"""Check if Python version is 3.10 or higher"""
if sys.version_info < (3, 10):
print("❌ Error: Python 3.10 or higher is required")
print(f" Current version: {sys.version}")
sys.exit(1)
print(f"✓ Python {sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}")
def check_system_dependencies():
"""Check and install system dependencies"""
system = platform.system().lower()
print("\n📦 Checking system dependencies...")
if system == "linux":
# Check for CUDA (optional)
if os.path.exists("/usr/local/cuda"):
print("✓ CUDA found")
else:
print("⚠ CUDA not found - GPU acceleration will be disabled")
# Check for tesseract
try:
subprocess.run(["tesseract", "--version"], check=True, capture_output=True)
print("✓ Tesseract OCR installed")
except (subprocess.CalledProcessError, FileNotFoundError):
print("⚠ Tesseract OCR not found - PDF processing may not work")
# Check for ffmpeg
try:
subprocess.run(["ffmpeg", "-version"], check=True, capture_output=True)
print("✓ FFmpeg installed")
except (subprocess.CalledProcessError, FileNotFoundError):
print("⚠ FFmpeg not found - audio processing may not work")
elif system == "darwin": # macOS
# Check for tesseract
try:
subprocess.run(["brew", "list", "tesseract"], check=True, capture_output=True)
print("✓ Tesseract OCR installed")
except (subprocess.CalledProcessError, FileNotFoundError):
print("⚠ Tesseract not found. Install with: brew install tesseract")
print()
def create_virtual_environment():
"""Create Python virtual environment"""
venv_path = Path("venv")
if venv_path.exists():
print("✓ Virtual environment already exists")
return venv_path
print("📦 Creating virtual environment...")
subprocess.run([sys.executable, "-m", "venv", "venv"], check=True)
print("✓ Virtual environment created")
return venv_path
def install_requirements(venv_path):
"""Install Python requirements"""
pip_path = venv_path / ("Scripts" if platform.system() == "Windows" else "bin") / "pip"
print("📦 Installing Python requirements...")
subprocess.run([str(pip_path), "install", "--upgrade", "pip"], check=True)
subprocess.run([str(pip_path), "install", "-r", "requirements.txt"], check=True)
print("✓ Python requirements installed")
def create_directories():
"""Create necessary directories"""
directories = [
"downloads",
"resumenes_docx",
"logs",
"processed"
]
print("\n📁 Creating directories...")
for directory in directories:
Path(directory).mkdir(exist_ok=True)
print(f"{directory}")
print()
def create_env_file():
"""Create .env file if it doesn't exist"""
env_path = Path(".env")
example_path = Path(".env.example")
if env_path.exists():
print("✓ .env file already exists")
return
if not example_path.exists():
print("⚠ .env.example not found")
return
print("\n📝 Creating .env file from template...")
print(" Please edit .env file and add your API keys")
with open(example_path, "r") as src:
content = src.read()
with open(env_path, "w") as dst:
dst.write(content)
print("✓ .env file created from .env.example")
print(" ⚠ Please edit .env and add your API keys!")
def main():
"""Main setup function"""
print("=" * 60)
print("CBCFacil Setup Script")
print("=" * 60)
print()
# Check Python version
check_python_version()
# Check system dependencies
check_system_dependencies()
# Create virtual environment
venv_path = create_virtual_environment()
# Install requirements
install_requirements(venv_path)
# Create directories
create_directories()
# Create .env file
create_env_file()
print("\n" + "=" * 60)
print("✓ Setup complete!")
print("=" * 60)
print("\nNext steps:")
print(" 1. Edit .env file and add your API keys")
print(" 2. Run: source venv/bin/activate (Linux/macOS)")
print(" or venv\\Scripts\\activate (Windows)")
print(" 3. Run: python main_refactored.py")
print("\nFor dashboard only:")
print(" python -c \"from api.routes import create_app; app = create_app(); app.run(port=5000)\"")
print()
if __name__ == "__main__":
main()

7
storage/__init__.py Normal file
View File

@@ -0,0 +1,7 @@
"""
Storage package for CBCFacil
"""
from .processed_registry import ProcessedRegistry
__all__ = ['ProcessedRegistry']

View File

@@ -0,0 +1,235 @@
"""
Processed files registry - Optimized version with bloom filter and better caching
"""
import fcntl
import logging
import time
from pathlib import Path
from typing import Set, Optional
from datetime import datetime, timedelta
from config import settings
class BloomFilter:
"""Simple Bloom Filter for fast membership testing"""
def __init__(self, size: int = 10000, hash_count: int = 3):
self.size = size
self.hash_count = hash_count
self.bit_array = [0] * size
def _hashes(self, item: str) -> list[int]:
"""Generate hash positions for item"""
import hashlib
digest = hashlib.md5(item.encode()).digest()
return [
int.from_bytes(digest[i:i+4], 'big') % self.size
for i in range(0, min(self.hash_count * 4, len(digest)), 4)
]
def add(self, item: str) -> None:
for pos in self._hashes(item):
self.bit_array[pos] = 1
def might_contain(self, item: str) -> bool:
return all(self.bit_array[pos] for pos in self._hashes(item))
class ProcessedRegistry:
"""Registry for tracking processed files with caching and file locking"""
def __init__(self):
self.logger = logging.getLogger(__name__)
self._cache: Set[str] = set()
self._cache_time: Optional[float] = None
self._cache_ttl = 300 # 5 minutos (antes era 60s)
self._initialized = False
self._bloom_filter = BloomFilter(size=10000, hash_count=3)
self._write_lock = False # Write batching
def initialize(self) -> None:
"""Initialize the registry"""
self.load()
self._initialized = True
self.logger.info(f"Processed registry initialized ({self.count()} files)")
def load(self) -> Set[str]:
"""Load processed files from disk with caching"""
now = time.time()
# Return cached data if still valid
if self._cache and self._cache_time:
age = now - self._cache_time
if age < self._cache_ttl:
return self._cache # Return reference, not copy for read-only
processed = set()
registry_path = settings.processed_files_path
try:
registry_path.parent.mkdir(parents=True, exist_ok=True)
if registry_path.exists():
with open(registry_path, 'r', encoding='utf-8') as f:
for raw_line in f:
line = raw_line.strip()
if line and not line.startswith('#'):
processed.add(line)
# Add basename for both path and basename lookups
base_name = Path(line).name
processed.add(base_name)
# Update bloom filter
self._bloom_filter.add(line)
self._bloom_filter.add(base_name)
except Exception as e:
self.logger.error(f"Error reading processed files registry: {e}")
self._cache = processed
self._cache_time = now
return processed # Return reference, not copy
def save(self, file_path: str) -> None:
"""Add file to processed registry with file locking"""
if not file_path:
return
registry_path = settings.processed_files_path
try:
registry_path.parent.mkdir(parents=True, exist_ok=True)
# Check cache first
if file_path in self._cache:
return
# Append to file
with open(registry_path, 'a', encoding='utf-8') as f:
fcntl.flock(f.fileno(), fcntl.LOCK_EX)
try:
f.write(file_path + "\n")
# Update in-memory structures
self._cache.add(file_path)
self._bloom_filter.add(file_path)
self._cache_time = time.time()
self.logger.debug(f"Added {file_path} to processed registry")
finally:
fcntl.flock(f.fileno(), fcntl.LOCK_UN)
except Exception as e:
self.logger.error(f"Error saving to processed files registry: {e}")
raise
def is_processed(self, file_path: str) -> bool:
"""Check if file has been processed - O(1) with bloom filter"""
if not self._initialized:
self.initialize()
# Fast bloom filter check first
if not self._bloom_filter.might_contain(file_path):
return False
# Check cache (O(1) for both full path and basename)
if file_path in self._cache:
return True
basename = Path(file_path).name
if basename in self._cache:
return True
return False
def save_batch(self, file_paths: list[str]) -> int:
"""Add multiple files to registry efficiently"""
saved_count = 0
try:
registry_path = settings.processed_files_path
registry_path.parent.mkdir(parents=True, exist_ok=True)
with open(registry_path, 'a', encoding='utf-8') as f:
fcntl.flock(f.fileno(), fcntl.LOCK_EX)
try:
lines_to_write = []
for file_path in file_paths:
if file_path and file_path not in self._cache:
lines_to_write.append(file_path + "\n")
self._cache.add(file_path)
self._bloom_filter.add(file_path)
saved_count += 1
if lines_to_write:
f.writelines(lines_to_write)
self._cache_time = time.time()
self.logger.debug(f"Added {saved_count} files to processed registry")
finally:
fcntl.flock(f.fileno(), fcntl.LOCK_UN)
except Exception as e:
self.logger.error(f"Error saving batch to processed files registry: {e}")
return saved_count
def remove(self, file_path: str) -> bool:
"""Remove file from processed registry"""
registry_path = settings.processed_files_path
try:
if not registry_path.exists():
return False
lines_to_keep = []
with open(registry_path, 'r', encoding='utf-8') as f:
for line in f:
stripped = line.strip()
if stripped != file_path and Path(stripped).name != Path(file_path).name:
lines_to_keep.append(line)
with open(registry_path, 'w', encoding='utf-8') as f:
fcntl.flock(f.fileno(), fcntl.LOCK_EX)
try:
f.writelines(lines_to_keep)
self._cache.discard(file_path)
self._cache.discard(Path(file_path).name)
# Rebuild bloom filter
self._bloom_filter = BloomFilter(size=10000, hash_count=3)
for item in self._cache:
self._bloom_filter.add(item)
finally:
fcntl.flock(f.fileno(), fcntl.LOCK_UN)
return True
except Exception as e:
self.logger.error(f"Error removing from processed files registry: {e}")
return False
def clear(self) -> None:
"""Clear the entire registry"""
registry_path = settings.processed_files_path
try:
if registry_path.exists():
registry_path.unlink()
self._cache.clear()
self._cache_time = None
self._bloom_filter = BloomFilter(size=10000, hash_count=3)
self.logger.info("Processed files registry cleared")
except Exception as e:
self.logger.error(f"Error clearing processed files registry: {e}")
raise
def get_all(self) -> Set[str]:
"""Get all processed files"""
if not self._initialized:
self.initialize()
return self._cache.copy()
def count(self) -> int:
"""Get count of processed files"""
if not self._initialized:
self.initialize()
return len(self._cache)
def get_stats(self) -> dict:
"""Get registry statistics"""
return {
"total_files": len(self._cache),
"cache_age_seconds": time.time() - self._cache_time if self._cache_time else 0,
"cache_ttl_seconds": self._cache_ttl,
"bloom_filter_size": self._bloom_filter.size,
"initialized": self._initialized
}
# Global instance
processed_registry = ProcessedRegistry()

2682
templates/index.html Normal file

File diff suppressed because it is too large Load Diff

BIN
test_audio/imperio.mp3 Normal file

Binary file not shown.

3
tests/__init__.py Normal file
View File

@@ -0,0 +1,3 @@
"""
Test package for CBCFacil
"""

20
tests/conftest.py Normal file
View File

@@ -0,0 +1,20 @@
"""
Pytest configuration for CBCFacil tests
Ensures proper Python path for module imports
"""
import sys
import os
from pathlib import Path
# Add project root to path
PROJECT_ROOT = Path(__file__).parent.parent
sys.path.insert(0, str(PROJECT_ROOT))
# Set environment variables for testing
os.environ.setdefault('LOCAL_STATE_DIR', str(PROJECT_ROOT / 'state'))
os.environ.setdefault('LOCAL_DOWNLOADS_PATH', str(PROJECT_ROOT / 'downloads'))
# Disable external service connections during tests
os.environ.setdefault('NEXTCLOUD_URL', '')
os.environ.setdefault('ANTHROPIC_AUTH_TOKEN', '')
os.environ.setdefault('GEMINI_API_KEY', '')

112
verify_improvements.py Executable file
View File

@@ -0,0 +1,112 @@
#!/usr/bin/env python3
"""
Script de verificación para mejoras de la Fase 3
Verifica que todas las mejoras están correctamente implementadas
"""
import sys
import os
from pathlib import Path
def check_file_exists(filepath, description):
"""Verificar que un archivo existe"""
if Path(filepath).exists():
print(f"{description}: {filepath}")
return True
else:
print(f"{description}: {filepath} NO ENCONTRADO")
return False
def check_function_in_file(filepath, function_name, description):
"""Verificar que una función existe en un archivo"""
try:
with open(filepath, 'r') as f:
content = f.read()
if function_name in content:
print(f"{description}: Encontrado '{function_name}'")
return True
else:
print(f"{description}: NO encontrado '{function_name}'")
return False
except Exception as e:
print(f"❌ Error leyendo {filepath}: {e}")
return False
def check_class_in_file(filepath, class_name, description):
"""Verificar que una clase existe en un archivo"""
return check_function_in_file(filepath, f"class {class_name}", description)
def main():
print("=" * 70)
print("🔍 VERIFICACIÓN DE MEJORAS FASE 3 - CBCFacil")
print("=" * 70)
print()
results = []
# 1. Verificar archivos modificados/creados
print("📁 ARCHIVOS:")
print("-" * 70)
results.append(check_file_exists("main.py", "main.py modificado"))
results.append(check_file_exists("config/settings.py", "config/settings.py modificado"))
results.append(check_file_exists("core/health_check.py", "core/health_check.py creado"))
results.append(check_file_exists("IMPROVEMENTS_LOG.md", "IMPROVEMENTS_LOG.md creado"))
print()
# 2. Verificar mejoras en main.py
print("🔧 MEJORAS EN main.py:")
print("-" * 70)
results.append(check_function_in_file("main.py", "logger.exception", "logger.exception() implementado"))
results.append(check_function_in_file("main.py", "class JSONFormatter", "JSONFormatter implementado"))
results.append(check_function_in_file("main.py", "def validate_configuration", "validate_configuration() implementado"))
results.append(check_function_in_file("main.py", "def check_service_health", "check_service_health() implementado"))
results.append(check_function_in_file("main.py", "def send_error_notification", "send_error_notification() implementado"))
results.append(check_function_in_file("main.py", "def setup_logging", "setup_logging() implementado"))
print()
# 3. Verificar mejoras en config/settings.py
print("⚙️ MEJORAS EN config/settings.py:")
print("-" * 70)
results.append(check_function_in_file("config/settings.py", "class ConfigurationError", "ConfigurationError definido"))
results.append(check_function_in_file("config/settings.py", "def nextcloud_url", "Propiedad nextcloud_url con validación"))
results.append(check_function_in_file("config/settings.py", "def valid_webdav_config", "Propiedad valid_webdav_config"))
results.append(check_function_in_file("config/settings.py", "def telegram_configured", "Propiedad telegram_configured"))
results.append(check_function_in_file("config/settings.py", "def has_gpu_support", "Propiedad has_gpu_support"))
results.append(check_function_in_file("config/settings.py", "def config_summary", "Propiedad config_summary"))
print()
# 4. Verificar core/health_check.py
print("❤️ HEALTH CHECKS:")
print("-" * 70)
results.append(check_function_in_file("core/health_check.py", "class HealthChecker", "Clase HealthChecker"))
results.append(check_function_in_file("core/health_check.py", "def check_webdav_connection", "check_webdav_connection()"))
results.append(check_function_in_file("core/health_check.py", "def check_ai_providers", "check_ai_providers()"))
results.append(check_function_in_file("core/health_check.py", "def check_vram_manager", "check_vram_manager()"))
results.append(check_function_in_file("core/health_check.py", "def check_disk_space", "check_disk_space()"))
results.append(check_function_in_file("core/health_check.py", "def run_full_health_check", "run_full_health_check()"))
print()
# Resumen
print("=" * 70)
print("📊 RESUMEN:")
print("=" * 70)
passed = sum(results)
total = len(results)
percentage = (passed / total) * 100
print(f"Verificaciones pasadas: {passed}/{total} ({percentage:.1f}%)")
print()
if percentage == 100:
print("🎉 ¡TODAS LAS MEJORAS ESTÁN CORRECTAMENTE IMPLEMENTADAS!")
print()
print("Puedes probar:")
print(" python main.py health")
print()
return 0
else:
print("⚠️ Algunas verificaciones fallaron")
print()
return 1
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,95 @@
#!/usr/bin/env python3
"""
Script para verificar y configurar permisos de Notion
"""
import sys
import logging
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent))
from config import settings
from notion_client import Client
logging.basicConfig(level=logging.INFO, format="%(levelname)s - %(message)s")
logger = logging.getLogger(__name__)
def main():
print("\n" + "=" * 60)
print("🔧 VERIFICACIÓN DE PERMISOS DE NOTION")
print("=" * 60 + "\n")
# Configuración
token = settings.NOTION_API_TOKEN
database_id = settings.NOTION_DATABASE_ID
if not token or not database_id:
print("❌ Falta configuración de Notion en .env")
print(f" NOTION_API: {'' if token else ''}")
print(f" NOTION_DATABASE_ID: {'' if database_id else ''}")
return
print(f"✅ Token configurado: {token[:20]}...")
print(f"✅ Database ID: {database_id}\n")
# Crear cliente
client = Client(auth=token)
print("📋 PASOS PARA CONFIGURAR LOS PERMISOS:\n")
print("1. Abre Notion y ve a tu base de datos 'CBC'")
print(f" URL: https://www.notion.so/{database_id}")
print("\n2. Click en los 3 puntos (⋯) en la esquina superior derecha")
print("\n3. Selecciona 'Connections' o 'Añadir conexiones'")
print("\n4. Busca tu integración y actívala")
print(f" (Debería aparecer con el nombre que le pusiste)")
print("\n5. Confirma los permisos\n")
print("-" * 60)
print("\n🧪 Intentando conectar con Notion...\n")
try:
# Intentar obtener la base de datos
database = client.databases.retrieve(database_id=database_id)
print("✅ ¡ÉXITO! La integración puede acceder a la base de datos")
print(f"\n📊 Información de la base de datos:")
print(
f" Título: {database['title'][0]['plain_text'] if database.get('title') else 'Sin título'}"
)
print(f" ID: {database['id']}")
print(f"\n Propiedades disponibles:")
for prop_name, prop_data in database.get("properties", {}).items():
prop_type = prop_data.get("type", "unknown")
print(f" - {prop_name}: {prop_type}")
print("\n" + "=" * 60)
print("✅ TODO CONFIGURADO CORRECTAMENTE")
print("=" * 60 + "\n")
print("🚀 Ahora ejecuta: python test_notion_integration.py")
print(" para probar subir un documento\n")
except Exception as e:
error_msg = str(e)
print("❌ ERROR AL CONECTAR CON NOTION\n")
print(f"Error: {error_msg}\n")
if "Could not find database" in error_msg:
print("⚠️ LA BASE DE DATOS NO ESTÁ COMPARTIDA CON TU INTEGRACIÓN")
print("\nSigue los pasos arriba para compartir la base de datos.")
elif "Unauthorized" in error_msg or "401" in error_msg:
print("⚠️ EL TOKEN DE API ES INVÁLIDO")
print("\nVerifica que el token esté correcto en .env")
else:
print("⚠️ ERROR DESCONOCIDO")
print(f"\nDetalles: {error_msg}")
print("\n" + "=" * 60 + "\n")
if __name__ == "__main__":
main()