Refactor: streamline core service and clean workspace
This commit is contained in:
17
main.py
17
main.py
@@ -103,8 +103,6 @@ DEFAULT_GEMINI_API_KEY = "AIzaSyDWOgyAJqscuPU6iSpS6gxupWBm4soNw5o"
|
||||
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") or DEFAULT_GEMINI_API_KEY
|
||||
TELEGRAM_TOKEN = os.getenv("TELEGRAM_TOKEN")
|
||||
TELEGRAM_CHAT_ID = os.getenv("TELEGRAM_CHAT_ID")
|
||||
OLLAMA_HOST = os.environ.get("OLLAMA_HOST", "http://ollama:11434")
|
||||
OLLAMA_MODEL = "mistral:7b"
|
||||
GEMINI_CLI_PATH = shutil.which("gemini")
|
||||
CLAUDE_CLI_PATH = shutil.which("claude")
|
||||
GEMINI_FLASH_MODEL = os.getenv("GEMINI_FLASH_MODEL")
|
||||
@@ -1209,21 +1207,6 @@ def run_gemini_summary(prompt):
|
||||
"""Genera resumen usando GLM-4.6 (compatibilidad)."""
|
||||
return run_gemini(prompt, use_flash=True)
|
||||
|
||||
def run_ollama(prompt):
|
||||
"""Genera contenido usando Ollama"""
|
||||
payload = {
|
||||
"model": OLLAMA_MODEL,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"stream": False
|
||||
}
|
||||
try:
|
||||
r = requests.post(f"{OLLAMA_HOST}/api/chat", json=payload, timeout=120)
|
||||
r.raise_for_status()
|
||||
response = r.json()
|
||||
return response['message']['content']
|
||||
except Exception as e:
|
||||
return f"Error Ollama: {e}"
|
||||
|
||||
# --- CLASIFICACIÓN INTELIGENTE DE CONTENIDO ---
|
||||
def classify_content_intelligent(text_content):
|
||||
"""Clasifica el contenido del resumen en categorías temáticas usando IA"""
|
||||
|
||||
Reference in New Issue
Block a user