Major improvements to AI content generation: ## New Components (app/services/ai/) - PromptLibrary: YAML-based prompt templates with inheritance - ContextEngine: Anti-repetition and best performers tracking - ContentGeneratorV2: Enhanced generation with dynamic parameters - PlatformAdapter: Platform-specific content adaptation - ContentValidator: AI-powered quality scoring (0-100) ## Prompt Library (app/prompts/) - 3 personalities: default, educational, promotional - 5 templates: tip_tech, product_post, service_post, thread, response - 4 platform configs: x, threads, instagram, facebook - Few-shot examples by category: ia, productividad, seguridad ## Database Changes - New table: content_memory (tracks generated content) - New columns in posts: quality_score, score_breakdown, generation_attempts ## New API Endpoints (/api/v2/generate/) - POST /generate - Generation with quality check - POST /generate/batch - Batch generation - POST /quality/evaluate - Evaluate content quality - GET /templates, /personalities, /platforms - List configs ## Celery Tasks - update_engagement_scores (every 6h) - cleanup_old_memory (monthly) - refresh_best_posts_yaml (weekly) ## Tests - Comprehensive tests for all AI engine components Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
618 lines
19 KiB
Python
618 lines
19 KiB
Python
"""
|
|
API endpoints v2 para generación de contenido con IA.
|
|
|
|
Nuevas funcionalidades:
|
|
- Quality scoring
|
|
- Regeneración automática
|
|
- Context-aware generation
|
|
- Métricas de generación
|
|
"""
|
|
|
|
from typing import Optional, List, Dict, Any
|
|
from fastapi import APIRouter, HTTPException, Depends
|
|
from pydantic import BaseModel, Field
|
|
from sqlalchemy.orm import Session
|
|
|
|
from app.core.database import get_db
|
|
from app.core.config import settings
|
|
|
|
|
|
router = APIRouter()
|
|
|
|
|
|
# ============================================================
|
|
# Schemas
|
|
# ============================================================
|
|
|
|
class GenerateV2Request(BaseModel):
|
|
"""Solicitud genérica de generación v2."""
|
|
template: str = Field(..., description="Nombre del template (tip_tech, product_post, etc.)")
|
|
variables: Dict[str, Any] = Field(default_factory=dict, description="Variables para el template")
|
|
platform: str = Field(default="x", description="Plataforma destino")
|
|
personality: Optional[str] = Field(None, description="Override de personalidad")
|
|
use_context: bool = Field(default=True, description="Usar anti-repetición")
|
|
use_few_shot: bool = Field(default=True, description="Usar ejemplos de posts exitosos")
|
|
validate_quality: bool = Field(default=True, description="Validar y scorear calidad")
|
|
max_attempts: int = Field(default=2, ge=1, le=5, description="Máximo intentos de regeneración")
|
|
|
|
|
|
class GenerateV2Response(BaseModel):
|
|
"""Respuesta de generación v2."""
|
|
success: bool
|
|
content: Optional[str] = None
|
|
adapted_content: Optional[str] = None
|
|
quality_score: Optional[int] = None
|
|
score_breakdown: Optional[Dict[str, int]] = None
|
|
is_top_quality: bool = False
|
|
attempts: int = 1
|
|
metadata: Optional[Dict[str, Any]] = None
|
|
error: Optional[str] = None
|
|
|
|
|
|
class BatchGenerateV2Request(BaseModel):
|
|
"""Solicitud de generación por lotes v2."""
|
|
template: str
|
|
variables_list: List[Dict[str, Any]]
|
|
platforms: List[str] = ["x"]
|
|
validate_quality: bool = True
|
|
|
|
|
|
class BatchGenerateV2Response(BaseModel):
|
|
"""Respuesta de generación por lotes v2."""
|
|
success: bool
|
|
total_requested: int
|
|
total_generated: int
|
|
results: List[GenerateV2Response]
|
|
average_quality: Optional[float] = None
|
|
|
|
|
|
class AdaptContentV2Request(BaseModel):
|
|
"""Solicitud de adaptación v2."""
|
|
content: str
|
|
source_platform: str = "instagram"
|
|
target_platforms: List[str]
|
|
use_ai: bool = True # Si usar IA para adaptación más precisa
|
|
|
|
|
|
class ContextInfoRequest(BaseModel):
|
|
"""Solicitud de información de contexto."""
|
|
content_type: str
|
|
category: Optional[str] = None
|
|
|
|
|
|
class QualityEvaluateRequest(BaseModel):
|
|
"""Solicitud de evaluación de calidad."""
|
|
content: str
|
|
platform: str = "x"
|
|
|
|
|
|
# ============================================================
|
|
# Helpers
|
|
# ============================================================
|
|
|
|
def check_api_configured():
|
|
"""Verificar que la API de DeepSeek esté configurada."""
|
|
if not settings.DEEPSEEK_API_KEY:
|
|
raise HTTPException(
|
|
status_code=503,
|
|
detail="DeepSeek API no configurada. Agrega DEEPSEEK_API_KEY en .env"
|
|
)
|
|
|
|
|
|
def get_content_generator():
|
|
"""Obtener instancia del generador v2."""
|
|
check_api_configured()
|
|
from app.services.content_generator import content_generator
|
|
if not content_generator._use_new_engine:
|
|
raise HTTPException(
|
|
status_code=503,
|
|
detail="Motor v2 no disponible. Verifica app.services.ai"
|
|
)
|
|
return content_generator
|
|
|
|
|
|
# ============================================================
|
|
# Endpoints de Generación
|
|
# ============================================================
|
|
|
|
@router.post("/generate", response_model=GenerateV2Response)
|
|
async def generate_content_v2(
|
|
request: GenerateV2Request,
|
|
db: Session = Depends(get_db)
|
|
):
|
|
"""
|
|
Generar contenido con el motor v2.
|
|
|
|
Características:
|
|
- **template**: tip_tech, product_post, service_post, thread, response
|
|
- **variables**: Dependen del template (ver /templates para detalles)
|
|
- **validate_quality**: Activa scoring con IA
|
|
- **use_context**: Evita repetir temas recientes
|
|
- **use_few_shot**: Usa posts exitosos como ejemplos
|
|
"""
|
|
generator = get_content_generator()
|
|
|
|
try:
|
|
if request.validate_quality:
|
|
result = await generator.generate_with_quality_check(
|
|
template_name=request.template,
|
|
variables=request.variables,
|
|
platform=request.platform,
|
|
db=db,
|
|
max_attempts=request.max_attempts
|
|
)
|
|
|
|
return GenerateV2Response(
|
|
success=True,
|
|
content=result["content"],
|
|
adapted_content=result["content"], # Ya está adaptado
|
|
quality_score=result.get("quality_score"),
|
|
score_breakdown=result.get("score_breakdown"),
|
|
is_top_quality=result.get("is_top_performer", False),
|
|
attempts=result.get("attempts", 1),
|
|
metadata=result.get("metadata")
|
|
)
|
|
else:
|
|
result = await generator._v2.generate(
|
|
template_name=request.template,
|
|
variables=request.variables,
|
|
platform=request.platform,
|
|
db=db if request.use_context else None,
|
|
use_context=request.use_context,
|
|
use_few_shot=request.use_few_shot,
|
|
personality=request.personality
|
|
)
|
|
|
|
return GenerateV2Response(
|
|
success=True,
|
|
content=result["content"],
|
|
adapted_content=result["adapted_content"],
|
|
metadata=result["metadata"]
|
|
)
|
|
|
|
except Exception as e:
|
|
return GenerateV2Response(
|
|
success=False,
|
|
error=str(e)
|
|
)
|
|
|
|
|
|
@router.post("/generate/batch", response_model=BatchGenerateV2Response)
|
|
async def generate_batch_v2(
|
|
request: BatchGenerateV2Request,
|
|
db: Session = Depends(get_db)
|
|
):
|
|
"""
|
|
Generar múltiples contenidos por lotes.
|
|
|
|
Útil para:
|
|
- Generar tips para toda la semana
|
|
- Crear variaciones de un mismo tema
|
|
- Preparar contenido para múltiples plataformas
|
|
"""
|
|
generator = get_content_generator()
|
|
|
|
results = []
|
|
total_score = 0
|
|
scored_count = 0
|
|
|
|
for variables in request.variables_list:
|
|
for platform in request.platforms:
|
|
try:
|
|
if request.validate_quality:
|
|
result = await generator.generate_with_quality_check(
|
|
template_name=request.template,
|
|
variables=variables,
|
|
platform=platform,
|
|
db=db,
|
|
max_attempts=2
|
|
)
|
|
|
|
score = result.get("quality_score")
|
|
if score:
|
|
total_score += score
|
|
scored_count += 1
|
|
|
|
results.append(GenerateV2Response(
|
|
success=True,
|
|
content=result["content"],
|
|
quality_score=score,
|
|
is_top_quality=result.get("is_top_performer", False),
|
|
attempts=result.get("attempts", 1)
|
|
))
|
|
else:
|
|
result = await generator._v2.generate(
|
|
template_name=request.template,
|
|
variables=variables,
|
|
platform=platform,
|
|
db=db
|
|
)
|
|
|
|
results.append(GenerateV2Response(
|
|
success=True,
|
|
content=result["content"],
|
|
adapted_content=result["adapted_content"]
|
|
))
|
|
|
|
except Exception as e:
|
|
results.append(GenerateV2Response(
|
|
success=False,
|
|
error=str(e)
|
|
))
|
|
|
|
return BatchGenerateV2Response(
|
|
success=all(r.success for r in results),
|
|
total_requested=len(request.variables_list) * len(request.platforms),
|
|
total_generated=sum(1 for r in results if r.success),
|
|
results=results,
|
|
average_quality=total_score / scored_count if scored_count > 0 else None
|
|
)
|
|
|
|
|
|
@router.post("/generate/multiplatform")
|
|
async def generate_for_all_platforms(
|
|
template: str,
|
|
variables: Dict[str, Any],
|
|
platforms: List[str] = ["x", "threads", "instagram", "facebook"],
|
|
db: Session = Depends(get_db)
|
|
):
|
|
"""
|
|
Generar contenido optimizado para múltiples plataformas.
|
|
|
|
Genera una versión base y la adapta inteligentemente
|
|
para cada plataforma usando IA.
|
|
"""
|
|
generator = get_content_generator()
|
|
|
|
try:
|
|
results = await generator._v2.generate_for_all_platforms(
|
|
template_name=template,
|
|
variables=variables,
|
|
platforms=platforms,
|
|
db=db
|
|
)
|
|
|
|
return {
|
|
"success": True,
|
|
"platforms": {
|
|
platform: {
|
|
"content": result["content"],
|
|
"adapted_content": result["adapted_content"],
|
|
"metadata": result.get("metadata", {})
|
|
}
|
|
for platform, result in results.items()
|
|
}
|
|
}
|
|
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=str(e))
|
|
|
|
|
|
# ============================================================
|
|
# Endpoints de Adaptación
|
|
# ============================================================
|
|
|
|
@router.post("/adapt")
|
|
async def adapt_content_v2(request: AdaptContentV2Request):
|
|
"""
|
|
Adaptar contenido a múltiples plataformas.
|
|
|
|
Si use_ai=True, usa IA para adaptación más precisa.
|
|
Si use_ai=False, usa reglas heurísticas (más rápido).
|
|
"""
|
|
generator = get_content_generator()
|
|
|
|
results = {}
|
|
|
|
for target in request.target_platforms:
|
|
try:
|
|
if request.use_ai:
|
|
adapted = await generator._v2.adapt_content(
|
|
content=request.content,
|
|
source_platform=request.source_platform,
|
|
target_platform=target
|
|
)
|
|
else:
|
|
from app.services.ai import platform_adapter
|
|
adapted_result = platform_adapter.adapt(request.content, target)
|
|
adapted = adapted_result.content
|
|
|
|
results[target] = {
|
|
"content": adapted,
|
|
"success": True
|
|
}
|
|
|
|
except Exception as e:
|
|
results[target] = {
|
|
"content": None,
|
|
"success": False,
|
|
"error": str(e)
|
|
}
|
|
|
|
return {
|
|
"original": request.content,
|
|
"source_platform": request.source_platform,
|
|
"adaptations": results
|
|
}
|
|
|
|
|
|
# ============================================================
|
|
# Endpoints de Calidad
|
|
# ============================================================
|
|
|
|
@router.post("/quality/evaluate")
|
|
async def evaluate_content_quality(request: QualityEvaluateRequest):
|
|
"""
|
|
Evaluar calidad de contenido existente.
|
|
|
|
Retorna score 0-100 con breakdown detallado.
|
|
"""
|
|
check_api_configured()
|
|
|
|
from app.services.ai import content_validator
|
|
|
|
try:
|
|
result = await content_validator.evaluate(
|
|
content=request.content,
|
|
platform=request.platform
|
|
)
|
|
|
|
return {
|
|
"success": True,
|
|
"validation": {
|
|
"passed": result.validation.passed,
|
|
"issues": result.validation.issues
|
|
},
|
|
"scoring": {
|
|
"total_score": result.scoring.total_score if result.scoring else None,
|
|
"breakdown": result.scoring.breakdown if result.scoring else None,
|
|
"feedback": result.scoring.feedback if result.scoring else None,
|
|
"is_top_quality": result.scoring.is_top_performer if result.scoring else False
|
|
},
|
|
"decision": result.final_decision
|
|
}
|
|
|
|
except Exception as e:
|
|
return {
|
|
"success": False,
|
|
"error": str(e)
|
|
}
|
|
|
|
|
|
@router.post("/quality/validate")
|
|
async def validate_content(
|
|
content: str,
|
|
platform: str = "x"
|
|
):
|
|
"""
|
|
Validar contenido sin scoring (más rápido, sin costo de tokens).
|
|
|
|
Verifica:
|
|
- Longitud dentro de límites
|
|
- Sin contenido prohibido
|
|
- Formato válido
|
|
"""
|
|
from app.services.ai import content_validator
|
|
|
|
result = content_validator.validate(content, platform)
|
|
|
|
return {
|
|
"valid": result.passed,
|
|
"issues": result.issues
|
|
}
|
|
|
|
|
|
# ============================================================
|
|
# Endpoints de Contexto
|
|
# ============================================================
|
|
|
|
@router.post("/context/info")
|
|
async def get_context_info(
|
|
request: ContextInfoRequest,
|
|
db: Session = Depends(get_db)
|
|
):
|
|
"""
|
|
Obtener información de contexto para generación.
|
|
|
|
Muestra:
|
|
- Temas usados recientemente
|
|
- Frases a evitar
|
|
- Hook sugerido
|
|
- Ejemplos de top performers
|
|
"""
|
|
from app.services.ai import context_engine
|
|
|
|
recent_topics = context_engine.get_recent_topics(db)
|
|
recent_phrases = context_engine.get_recent_phrases(db)
|
|
recent_hooks = context_engine.get_recent_hooks(db)
|
|
suggested_hook = context_engine.suggest_hook_type(db)
|
|
|
|
top_performers = context_engine.get_top_performers(
|
|
db,
|
|
content_type=request.content_type,
|
|
limit=3
|
|
)
|
|
|
|
return {
|
|
"recent_topics": recent_topics[:10],
|
|
"recent_phrases": recent_phrases[:5],
|
|
"hook_usage": recent_hooks,
|
|
"suggested_hook": suggested_hook,
|
|
"top_performers": [
|
|
{
|
|
"post_id": tp.post_id,
|
|
"engagement_score": tp.engagement_score,
|
|
"hook_type": tp.hook_type,
|
|
"topics": tp.topics
|
|
}
|
|
for tp in top_performers
|
|
]
|
|
}
|
|
|
|
|
|
@router.get("/context/exclusions")
|
|
async def get_exclusion_context(
|
|
content_type: str,
|
|
category: Optional[str] = None,
|
|
db: Session = Depends(get_db)
|
|
):
|
|
"""
|
|
Obtener contexto de exclusión formateado.
|
|
|
|
Útil para ver qué se excluirá en la próxima generación.
|
|
"""
|
|
from app.services.ai import context_engine
|
|
|
|
exclusion_text = context_engine.build_exclusion_context(
|
|
db, content_type, category
|
|
)
|
|
|
|
return {
|
|
"content_type": content_type,
|
|
"category": category,
|
|
"exclusion_context": exclusion_text
|
|
}
|
|
|
|
|
|
# ============================================================
|
|
# Endpoints de Templates y Configuración
|
|
# ============================================================
|
|
|
|
@router.get("/templates")
|
|
async def list_templates():
|
|
"""
|
|
Listar todos los templates disponibles con sus variables.
|
|
"""
|
|
from app.services.ai import prompt_library
|
|
|
|
templates = prompt_library.list_templates()
|
|
|
|
result = []
|
|
for name in templates:
|
|
try:
|
|
template = prompt_library.get_template(name)
|
|
result.append({
|
|
"name": name,
|
|
"description": template.get("description", ""),
|
|
"personality": template.get("personality", "default"),
|
|
"variables": [
|
|
{
|
|
"name": v["name"],
|
|
"type": v.get("type", "string"),
|
|
"required": v.get("required", False),
|
|
"default": v.get("default"),
|
|
"options": v.get("options")
|
|
}
|
|
for v in template.get("variables", [])
|
|
],
|
|
"parameters": template.get("parameters", {})
|
|
})
|
|
except Exception:
|
|
continue
|
|
|
|
return {"templates": result}
|
|
|
|
|
|
@router.get("/personalities")
|
|
async def list_personalities():
|
|
"""
|
|
Listar personalidades disponibles.
|
|
"""
|
|
from app.services.ai import prompt_library
|
|
|
|
personalities = prompt_library.list_personalities()
|
|
|
|
result = []
|
|
for name in personalities:
|
|
try:
|
|
pers = prompt_library.get_personality(name)
|
|
result.append({
|
|
"name": name,
|
|
"description": pers.get("description", ""),
|
|
"voice": pers.get("voice", {}),
|
|
"extends": pers.get("extends")
|
|
})
|
|
except Exception:
|
|
continue
|
|
|
|
return {"personalities": result}
|
|
|
|
|
|
@router.get("/platforms")
|
|
async def list_platforms():
|
|
"""
|
|
Listar plataformas configuradas con sus límites.
|
|
"""
|
|
from app.services.ai import prompt_library
|
|
|
|
platforms = prompt_library.list_platforms()
|
|
|
|
result = []
|
|
for name in platforms:
|
|
try:
|
|
config = prompt_library.get_platform_config(name)
|
|
result.append({
|
|
"name": name,
|
|
"display_name": config.get("display_name", name),
|
|
"limits": config.get("limits", {}),
|
|
"tone": config.get("tone", {})
|
|
})
|
|
except Exception:
|
|
continue
|
|
|
|
return {"platforms": result}
|
|
|
|
|
|
@router.get("/status")
|
|
async def get_v2_status():
|
|
"""
|
|
Verificar estado del motor v2.
|
|
"""
|
|
result = {
|
|
"api_configured": bool(settings.DEEPSEEK_API_KEY),
|
|
"provider": "DeepSeek",
|
|
"model": "deepseek-chat",
|
|
"v2_engine": False,
|
|
"components": {}
|
|
}
|
|
|
|
# Verificar componentes
|
|
try:
|
|
from app.services.ai import PromptLibrary
|
|
lib = PromptLibrary()
|
|
result["components"]["prompt_library"] = {
|
|
"status": "ok",
|
|
"templates": len(lib.list_templates()),
|
|
"personalities": len(lib.list_personalities()),
|
|
"platforms": len(lib.list_platforms())
|
|
}
|
|
except Exception as e:
|
|
result["components"]["prompt_library"] = {"status": "error", "error": str(e)}
|
|
|
|
try:
|
|
from app.services.ai import ContextEngine
|
|
result["components"]["context_engine"] = {"status": "ok"}
|
|
except Exception as e:
|
|
result["components"]["context_engine"] = {"status": "error", "error": str(e)}
|
|
|
|
try:
|
|
from app.services.ai import PlatformAdapter
|
|
result["components"]["platform_adapter"] = {"status": "ok"}
|
|
except Exception as e:
|
|
result["components"]["platform_adapter"] = {"status": "error", "error": str(e)}
|
|
|
|
try:
|
|
from app.services.ai import ContentValidator
|
|
result["components"]["validator"] = {"status": "ok"}
|
|
except Exception as e:
|
|
result["components"]["validator"] = {"status": "error", "error": str(e)}
|
|
|
|
try:
|
|
from app.services.ai import ContentGeneratorV2
|
|
result["v2_engine"] = True
|
|
result["components"]["generator_v2"] = {"status": "ok"}
|
|
except Exception as e:
|
|
result["components"]["generator_v2"] = {"status": "error", "error": str(e)}
|
|
|
|
return result
|