Major improvements to AI content generation: ## New Components (app/services/ai/) - PromptLibrary: YAML-based prompt templates with inheritance - ContextEngine: Anti-repetition and best performers tracking - ContentGeneratorV2: Enhanced generation with dynamic parameters - PlatformAdapter: Platform-specific content adaptation - ContentValidator: AI-powered quality scoring (0-100) ## Prompt Library (app/prompts/) - 3 personalities: default, educational, promotional - 5 templates: tip_tech, product_post, service_post, thread, response - 4 platform configs: x, threads, instagram, facebook - Few-shot examples by category: ia, productividad, seguridad ## Database Changes - New table: content_memory (tracks generated content) - New columns in posts: quality_score, score_breakdown, generation_attempts ## New API Endpoints (/api/v2/generate/) - POST /generate - Generation with quality check - POST /generate/batch - Batch generation - POST /quality/evaluate - Evaluate content quality - GET /templates, /personalities, /platforms - List configs ## Celery Tasks - update_engagement_scores (every 6h) - cleanup_old_memory (monthly) - refresh_best_posts_yaml (weekly) ## Tests - Comprehensive tests for all AI engine components Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
520 lines
16 KiB
Python
520 lines
16 KiB
Python
"""
|
|
ContextEngine - Motor de contexto para generación inteligente.
|
|
|
|
Este módulo maneja:
|
|
- Anti-repetición de temas y frases
|
|
- Selección de best performers para few-shot learning
|
|
- Ventana de memoria de posts recientes
|
|
- Análisis semántico de contenido
|
|
"""
|
|
|
|
from datetime import datetime, timedelta
|
|
from typing import Dict, List, Optional, Any, Tuple
|
|
from sqlalchemy import func, desc
|
|
from sqlalchemy.orm import Session
|
|
|
|
from app.models.content_memory import ContentMemory
|
|
from app.models.post import Post
|
|
|
|
|
|
class ContextEngine:
|
|
"""
|
|
Motor de contexto para generación de contenido.
|
|
|
|
Responsabilidades:
|
|
1. Rastrear contenido generado para evitar repeticiones
|
|
2. Identificar y usar posts exitosos como ejemplos
|
|
3. Sugerir variaciones de hooks y estilos
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
memory_window: int = 50,
|
|
topic_cooldown_days: int = 7,
|
|
phrase_cooldown_days: int = 14,
|
|
top_percentile: int = 20
|
|
):
|
|
"""
|
|
Inicializar el Context Engine.
|
|
|
|
Args:
|
|
memory_window: Cantidad de posts recientes a recordar
|
|
topic_cooldown_days: Días antes de repetir un tema
|
|
phrase_cooldown_days: Días antes de repetir una frase distintiva
|
|
top_percentile: Percentil para considerar "top performer"
|
|
"""
|
|
self.memory_window = memory_window
|
|
self.topic_cooldown_days = topic_cooldown_days
|
|
self.phrase_cooldown_days = phrase_cooldown_days
|
|
self.top_percentile = top_percentile
|
|
|
|
# === Anti-Repetición ===
|
|
|
|
def get_recent_topics(
|
|
self,
|
|
db: Session,
|
|
days: Optional[int] = None,
|
|
limit: int = 100
|
|
) -> List[str]:
|
|
"""
|
|
Obtener temas usados recientemente.
|
|
|
|
Args:
|
|
db: Sesión de base de datos
|
|
days: Días hacia atrás (default: topic_cooldown_days)
|
|
limit: Máximo de registros a consultar
|
|
|
|
Returns:
|
|
Lista de temas usados recientemente
|
|
"""
|
|
if days is None:
|
|
days = self.topic_cooldown_days
|
|
|
|
since = datetime.utcnow() - timedelta(days=days)
|
|
|
|
memories = db.query(ContentMemory).filter(
|
|
ContentMemory.created_at >= since,
|
|
ContentMemory.topics.isnot(None)
|
|
).order_by(desc(ContentMemory.created_at)).limit(limit).all()
|
|
|
|
# Flatten y contar frecuencia
|
|
all_topics = []
|
|
for mem in memories:
|
|
if mem.topics:
|
|
all_topics.extend(mem.topics)
|
|
|
|
return list(set(all_topics))
|
|
|
|
def get_recent_phrases(
|
|
self,
|
|
db: Session,
|
|
days: Optional[int] = None,
|
|
limit: int = 100
|
|
) -> List[str]:
|
|
"""
|
|
Obtener frases distintivas usadas recientemente.
|
|
|
|
Args:
|
|
db: Sesión de base de datos
|
|
days: Días hacia atrás (default: phrase_cooldown_days)
|
|
limit: Máximo de registros a consultar
|
|
|
|
Returns:
|
|
Lista de frases usadas recientemente
|
|
"""
|
|
if days is None:
|
|
days = self.phrase_cooldown_days
|
|
|
|
since = datetime.utcnow() - timedelta(days=days)
|
|
|
|
memories = db.query(ContentMemory).filter(
|
|
ContentMemory.created_at >= since,
|
|
ContentMemory.key_phrases.isnot(None)
|
|
).order_by(desc(ContentMemory.created_at)).limit(limit).all()
|
|
|
|
all_phrases = []
|
|
for mem in memories:
|
|
if mem.key_phrases:
|
|
all_phrases.extend(mem.key_phrases)
|
|
|
|
return list(set(all_phrases))
|
|
|
|
def get_recent_hooks(
|
|
self,
|
|
db: Session,
|
|
days: int = 14,
|
|
limit: int = 50
|
|
) -> Dict[str, int]:
|
|
"""
|
|
Obtener tipos de hooks usados recientemente con frecuencia.
|
|
|
|
Args:
|
|
db: Sesión de base de datos
|
|
days: Días hacia atrás
|
|
limit: Máximo de registros
|
|
|
|
Returns:
|
|
Dict de hook_type -> count
|
|
"""
|
|
since = datetime.utcnow() - timedelta(days=days)
|
|
|
|
memories = db.query(ContentMemory).filter(
|
|
ContentMemory.created_at >= since,
|
|
ContentMemory.hook_type.isnot(None)
|
|
).order_by(desc(ContentMemory.created_at)).limit(limit).all()
|
|
|
|
hook_counts: Dict[str, int] = {}
|
|
for mem in memories:
|
|
hook_counts[mem.hook_type] = hook_counts.get(mem.hook_type, 0) + 1
|
|
|
|
return hook_counts
|
|
|
|
def suggest_hook_type(
|
|
self,
|
|
db: Session,
|
|
preferred_hooks: Optional[List[str]] = None
|
|
) -> str:
|
|
"""
|
|
Sugerir un tipo de hook basado en lo menos usado recientemente.
|
|
|
|
Args:
|
|
db: Sesión de base de datos
|
|
preferred_hooks: Lista de hooks preferidos para esta plataforma
|
|
|
|
Returns:
|
|
Tipo de hook sugerido
|
|
"""
|
|
recent_hooks = self.get_recent_hooks(db)
|
|
|
|
if not preferred_hooks:
|
|
preferred_hooks = [
|
|
"pregunta_retórica",
|
|
"dato_impactante",
|
|
"tip_directo",
|
|
"afirmación_bold",
|
|
"historia_corta"
|
|
]
|
|
|
|
# Encontrar el hook menos usado
|
|
min_count = float("inf")
|
|
suggested = preferred_hooks[0]
|
|
|
|
for hook in preferred_hooks:
|
|
count = recent_hooks.get(hook, 0)
|
|
if count < min_count:
|
|
min_count = count
|
|
suggested = hook
|
|
|
|
return suggested
|
|
|
|
def build_exclusion_context(
|
|
self,
|
|
db: Session,
|
|
content_type: str,
|
|
category: Optional[str] = None
|
|
) -> str:
|
|
"""
|
|
Construir contexto de exclusión para el prompt.
|
|
|
|
Args:
|
|
db: Sesión de base de datos
|
|
content_type: Tipo de contenido (tip_tech, product_post, etc.)
|
|
category: Categoría específica (ia, productividad, etc.)
|
|
|
|
Returns:
|
|
String con instrucciones de exclusión para el prompt
|
|
"""
|
|
recent_topics = self.get_recent_topics(db)
|
|
recent_phrases = self.get_recent_phrases(db)
|
|
|
|
exclusions = []
|
|
|
|
if recent_topics:
|
|
topics_str = ", ".join(recent_topics[:10])
|
|
exclusions.append(f"TEMAS YA CUBIERTOS RECIENTEMENTE (evitar): {topics_str}")
|
|
|
|
if recent_phrases:
|
|
phrases_str = "; ".join(recent_phrases[:5])
|
|
exclusions.append(f"FRASES YA USADAS (no repetir): {phrases_str}")
|
|
|
|
# Sugerir hook menos usado
|
|
suggested_hook = self.suggest_hook_type(db)
|
|
exclusions.append(f"HOOK SUGERIDO: {suggested_hook} (poco usado recientemente)")
|
|
|
|
if exclusions:
|
|
return "\n".join(exclusions)
|
|
return ""
|
|
|
|
# === Best Performers ===
|
|
|
|
def get_top_performers(
|
|
self,
|
|
db: Session,
|
|
content_type: Optional[str] = None,
|
|
platform: Optional[str] = None,
|
|
limit: int = 5
|
|
) -> List[ContentMemory]:
|
|
"""
|
|
Obtener posts con mejor rendimiento.
|
|
|
|
Args:
|
|
db: Sesión de base de datos
|
|
content_type: Filtrar por tipo de contenido
|
|
platform: Filtrar por plataforma
|
|
limit: Máximo de resultados
|
|
|
|
Returns:
|
|
Lista de ContentMemory de top performers
|
|
"""
|
|
query = db.query(ContentMemory).filter(
|
|
ContentMemory.is_top_performer == True,
|
|
ContentMemory.engagement_score.isnot(None)
|
|
)
|
|
|
|
if content_type:
|
|
query = query.filter(ContentMemory.content_type == content_type)
|
|
|
|
if platform:
|
|
query = query.filter(ContentMemory.platform == platform)
|
|
|
|
# Ordenar por score y limitar uso excesivo
|
|
return query.order_by(
|
|
desc(ContentMemory.engagement_score),
|
|
ContentMemory.times_used_as_example # Preferir menos usados
|
|
).limit(limit).all()
|
|
|
|
def get_few_shot_examples(
|
|
self,
|
|
db: Session,
|
|
content_type: str,
|
|
platform: Optional[str] = None,
|
|
min_examples: int = 2,
|
|
max_examples: int = 5
|
|
) -> List[str]:
|
|
"""
|
|
Obtener ejemplos de contenido real para few-shot prompting.
|
|
|
|
Args:
|
|
db: Sesión de base de datos
|
|
content_type: Tipo de contenido
|
|
platform: Plataforma específica
|
|
min_examples: Mínimo de ejemplos
|
|
max_examples: Máximo de ejemplos
|
|
|
|
Returns:
|
|
Lista de contenidos de posts exitosos
|
|
"""
|
|
top_performers = self.get_top_performers(
|
|
db, content_type, platform, limit=max_examples
|
|
)
|
|
|
|
examples = []
|
|
for mem in top_performers:
|
|
# Obtener contenido del post original
|
|
post = db.query(Post).filter(Post.id == mem.post_id).first()
|
|
if post:
|
|
content = post.get_content_for_platform(platform or "x")
|
|
examples.append(content)
|
|
|
|
# Registrar uso como ejemplo
|
|
mem.record_example_usage()
|
|
|
|
db.commit()
|
|
|
|
return examples[:max_examples]
|
|
|
|
def build_few_shot_context(
|
|
self,
|
|
db: Session,
|
|
content_type: str,
|
|
platform: Optional[str] = None
|
|
) -> str:
|
|
"""
|
|
Construir contexto de few-shot para el prompt.
|
|
|
|
Args:
|
|
db: Sesión de base de datos
|
|
content_type: Tipo de contenido
|
|
platform: Plataforma
|
|
|
|
Returns:
|
|
String con ejemplos formateados para el prompt
|
|
"""
|
|
examples = self.get_few_shot_examples(db, content_type, platform)
|
|
|
|
if not examples:
|
|
return ""
|
|
|
|
formatted = ["EJEMPLOS DE POSTS EXITOSOS (inspírate en el estilo):"]
|
|
for i, ex in enumerate(examples, 1):
|
|
formatted.append(f"\n--- Ejemplo {i} ---\n{ex}")
|
|
|
|
return "\n".join(formatted)
|
|
|
|
# === Análisis de Contenido ===
|
|
|
|
def analyze_content(
|
|
self,
|
|
content: str,
|
|
content_type: str,
|
|
platform: str
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Analizar contenido generado para almacenar en memoria.
|
|
|
|
Este es un análisis básico basado en reglas.
|
|
Para análisis más sofisticado, usar el Validator con IA.
|
|
|
|
Args:
|
|
content: Contenido a analizar
|
|
content_type: Tipo de contenido
|
|
platform: Plataforma destino
|
|
|
|
Returns:
|
|
Dict con análisis (topics, key_phrases, hook_type)
|
|
"""
|
|
# Detectar hook type basado en primera línea
|
|
first_line = content.split("\n")[0].strip()
|
|
hook_type = self._detect_hook_type(first_line)
|
|
|
|
# Extraer posibles temas (simplificado)
|
|
topics = self._extract_topics(content)
|
|
|
|
# Extraer frases distintivas
|
|
key_phrases = self._extract_key_phrases(content)
|
|
|
|
return {
|
|
"hook_type": hook_type,
|
|
"topics": topics,
|
|
"key_phrases": key_phrases,
|
|
"content_summary": content[:200], # Resumen simple
|
|
}
|
|
|
|
def _detect_hook_type(self, first_line: str) -> str:
|
|
"""Detectar tipo de hook basado en la primera línea."""
|
|
first_line_lower = first_line.lower()
|
|
|
|
if first_line.endswith("?"):
|
|
return "pregunta_retórica"
|
|
elif any(char.isdigit() for char in first_line) and "%" in first_line:
|
|
return "dato_impactante"
|
|
elif first_line_lower.startswith(("tip:", "consejo:", "truco:")):
|
|
return "tip_directo"
|
|
elif "🧵" in first_line or "hilo" in first_line_lower:
|
|
return "hilo_intro"
|
|
elif any(word in first_line_lower for word in ["nunca", "siempre", "error", "mito"]):
|
|
return "afirmación_bold"
|
|
else:
|
|
return "general"
|
|
|
|
def _extract_topics(self, content: str) -> List[str]:
|
|
"""Extraer temas del contenido (basado en keywords)."""
|
|
content_lower = content.lower()
|
|
|
|
topic_keywords = {
|
|
"ia": ["ia", "inteligencia artificial", "chatgpt", "claude", "deepseek", "llm"],
|
|
"productividad": ["productividad", "tiempo", "eficiencia", "organización", "tareas"],
|
|
"python": ["python", "django", "flask", "pip"],
|
|
"seguridad": ["seguridad", "password", "contraseña", "phishing", "privacidad"],
|
|
"automatización": ["automatización", "automatizar", "script", "workflow"],
|
|
"hardware": ["laptop", "computadora", "impresora", "monitor", "teclado"],
|
|
}
|
|
|
|
found_topics = []
|
|
for topic, keywords in topic_keywords.items():
|
|
if any(kw in content_lower for kw in keywords):
|
|
found_topics.append(topic)
|
|
|
|
return found_topics
|
|
|
|
def _extract_key_phrases(self, content: str) -> List[str]:
|
|
"""Extraer frases distintivas del contenido."""
|
|
# Buscar patrones como "la regla X", "el método Y", etc.
|
|
phrases = []
|
|
|
|
import re
|
|
|
|
# Patrones comunes de frases distintivas
|
|
patterns = [
|
|
r"la regla [\w\-]+",
|
|
r"el método [\w\-]+",
|
|
r"el truco [\w\-]+",
|
|
r"la técnica [\w\-]+",
|
|
r"\d+[%] [\w\s]{5,20}", # "90% de developers..."
|
|
]
|
|
|
|
for pattern in patterns:
|
|
matches = re.findall(pattern, content.lower())
|
|
phrases.extend(matches)
|
|
|
|
return list(set(phrases))[:5] # Max 5 frases
|
|
|
|
# === Persistencia ===
|
|
|
|
def save_to_memory(
|
|
self,
|
|
db: Session,
|
|
post_id: int,
|
|
content: str,
|
|
content_type: str,
|
|
platform: str,
|
|
quality_score: Optional[int] = None,
|
|
quality_breakdown: Optional[Dict] = None,
|
|
template_used: Optional[str] = None,
|
|
personality_used: Optional[str] = None
|
|
) -> ContentMemory:
|
|
"""
|
|
Guardar contenido en memoria para tracking.
|
|
|
|
Args:
|
|
db: Sesión de base de datos
|
|
post_id: ID del post
|
|
content: Contenido generado
|
|
content_type: Tipo de contenido
|
|
platform: Plataforma
|
|
quality_score: Score de calidad
|
|
quality_breakdown: Breakdown del score
|
|
template_used: Template usado
|
|
personality_used: Personalidad usada
|
|
|
|
Returns:
|
|
ContentMemory creado
|
|
"""
|
|
# Analizar contenido
|
|
analysis = self.analyze_content(content, content_type, platform)
|
|
|
|
memory = ContentMemory(
|
|
post_id=post_id,
|
|
topics=analysis["topics"],
|
|
key_phrases=analysis["key_phrases"],
|
|
hook_type=analysis["hook_type"],
|
|
content_summary=analysis["content_summary"],
|
|
quality_score=quality_score,
|
|
quality_breakdown=quality_breakdown,
|
|
platform=platform,
|
|
content_type=content_type,
|
|
template_used=template_used,
|
|
personality_used=personality_used,
|
|
)
|
|
|
|
db.add(memory)
|
|
db.commit()
|
|
db.refresh(memory)
|
|
|
|
return memory
|
|
|
|
def update_engagement_scores(self, db: Session) -> int:
|
|
"""
|
|
Actualizar scores de engagement y marcar top performers.
|
|
|
|
Debe ejecutarse periódicamente (ej: tarea Celery diaria).
|
|
|
|
Returns:
|
|
Número de registros actualizados
|
|
"""
|
|
# Calcular percentil para top performer
|
|
all_scores = db.query(ContentMemory.engagement_score).filter(
|
|
ContentMemory.engagement_score.isnot(None)
|
|
).all()
|
|
|
|
if not all_scores:
|
|
return 0
|
|
|
|
scores = sorted([s[0] for s in all_scores], reverse=True)
|
|
threshold_idx = max(0, int(len(scores) * self.top_percentile / 100) - 1)
|
|
threshold_score = scores[threshold_idx]
|
|
|
|
# Marcar top performers
|
|
updated = db.query(ContentMemory).filter(
|
|
ContentMemory.engagement_score >= threshold_score,
|
|
ContentMemory.is_top_performer == False
|
|
).update({"is_top_performer": True})
|
|
|
|
db.commit()
|
|
|
|
return updated
|
|
|
|
|
|
# Instancia global con configuración por defecto
|
|
context_engine = ContextEngine()
|