feat: Add Content Generation Engine v2 with quality scoring

Major improvements to AI content generation:

## New Components (app/services/ai/)
- PromptLibrary: YAML-based prompt templates with inheritance
- ContextEngine: Anti-repetition and best performers tracking
- ContentGeneratorV2: Enhanced generation with dynamic parameters
- PlatformAdapter: Platform-specific content adaptation
- ContentValidator: AI-powered quality scoring (0-100)

## Prompt Library (app/prompts/)
- 3 personalities: default, educational, promotional
- 5 templates: tip_tech, product_post, service_post, thread, response
- 4 platform configs: x, threads, instagram, facebook
- Few-shot examples by category: ia, productividad, seguridad

## Database Changes
- New table: content_memory (tracks generated content)
- New columns in posts: quality_score, score_breakdown, generation_attempts

## New API Endpoints (/api/v2/generate/)
- POST /generate - Generation with quality check
- POST /generate/batch - Batch generation
- POST /quality/evaluate - Evaluate content quality
- GET /templates, /personalities, /platforms - List configs

## Celery Tasks
- update_engagement_scores (every 6h)
- cleanup_old_memory (monthly)
- refresh_best_posts_yaml (weekly)

## Tests
- Comprehensive tests for all AI engine components

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-01-28 20:55:28 +00:00
parent f458f809ca
commit 11b0ba46fa
36 changed files with 6266 additions and 55 deletions

447
tests/test_ai_engine.py Normal file
View File

@@ -0,0 +1,447 @@
"""
Tests para el Content Generation Engine v2.
Tests para:
- PromptLibrary: Carga de YAMLs, renderizado
- ContextEngine: Anti-repetición, best performers
- PlatformAdapter: Adaptación por plataforma
- ContentValidator: Validación y scoring
"""
import pytest
from pathlib import Path
from unittest.mock import MagicMock, patch, AsyncMock
import json
# ============================================================
# PromptLibrary Tests
# ============================================================
class TestPromptLibrary:
"""Tests para PromptLibrary."""
@pytest.fixture
def prompt_lib(self):
"""Create PromptLibrary instance."""
from app.services.ai.prompt_library import PromptLibrary
return PromptLibrary()
def test_list_templates(self, prompt_lib):
"""Test listado de templates disponibles."""
templates = prompt_lib.list_templates()
assert isinstance(templates, list)
assert "tip_tech" in templates
assert "product_post" in templates
assert "service_post" in templates
assert "thread" in templates
def test_list_personalities(self, prompt_lib):
"""Test listado de personalidades disponibles."""
personalities = prompt_lib.list_personalities()
assert isinstance(personalities, list)
assert "default" in personalities
assert "educational" in personalities
assert "promotional" in personalities
def test_list_platforms(self, prompt_lib):
"""Test listado de plataformas configuradas."""
platforms = prompt_lib.list_platforms()
assert isinstance(platforms, list)
assert "x" in platforms
assert "threads" in platforms
assert "instagram" in platforms
assert "facebook" in platforms
def test_get_personality_default(self, prompt_lib):
"""Test carga de personalidad default."""
personality = prompt_lib.get_personality("default")
assert "name" in personality
assert personality["name"] == "default"
assert "voice" in personality
assert "system_prompt" in personality
def test_get_personality_with_inheritance(self, prompt_lib):
"""Test que personalidades heredan correctamente."""
educational = prompt_lib.get_personality("educational")
# Debe tener propiedades de base (default)
assert "system_prompt" in educational
# Debe tener propiedades propias
assert "teaching_techniques" in educational
def test_get_template(self, prompt_lib):
"""Test carga de template."""
template = prompt_lib.get_template("tip_tech")
assert "name" in template
assert template["name"] == "tip_tech"
assert "template" in template
assert "variables" in template
assert "parameters" in template
def test_render_template_basic(self, prompt_lib):
"""Test renderizado básico de template."""
with patch('app.services.ai.prompt_library.settings') as mock_settings:
mock_settings.BUSINESS_NAME = "Test Corp"
mock_settings.BUSINESS_LOCATION = "Test City"
mock_settings.BUSINESS_WEBSITE = "test.com"
mock_settings.CONTENT_TONE = "Professional"
rendered = prompt_lib.render_template(
"tip_tech",
{"category": "productividad"}
)
assert "system_prompt" in rendered
assert "user_prompt" in rendered
assert "parameters" in rendered
assert "productividad" in rendered["user_prompt"]
def test_render_template_with_defaults(self, prompt_lib):
"""Test que variables con defaults funcionan."""
with patch('app.services.ai.prompt_library.settings') as mock_settings:
mock_settings.BUSINESS_NAME = "Test Corp"
mock_settings.BUSINESS_LOCATION = "Test City"
mock_settings.BUSINESS_WEBSITE = "test.com"
mock_settings.CONTENT_TONE = "Professional"
# Solo category es requerida, difficulty_level tiene default
rendered = prompt_lib.render_template(
"tip_tech",
{"category": "ia"}
)
# Debe funcionar sin error y usar default
assert "user_prompt" in rendered
def test_get_platform_config(self, prompt_lib):
"""Test carga de configuración de plataforma."""
config = prompt_lib.get_platform_config("x")
assert "platform" in config
assert config["platform"] == "x"
assert "limits" in config
assert "max_characters" in config["limits"]
assert config["limits"]["max_characters"] == 280
def test_get_platform_limits(self, prompt_lib):
"""Test obtención de límites de plataforma."""
limits = prompt_lib.get_platform_limits("instagram")
assert "max_characters" in limits
assert limits["max_characters"] == 2200
assert "max_hashtags" in limits
def test_cache_works(self, prompt_lib):
"""Test que el cache evita lecturas repetidas."""
# Primera carga
prompt_lib.get_template("tip_tech")
# Debe estar en cache
assert any("tip_tech" in key for key in prompt_lib._cache.keys())
# Segunda carga usa cache (no debería fallar)
template = prompt_lib.get_template("tip_tech")
assert template["name"] == "tip_tech"
def test_clear_cache(self, prompt_lib):
"""Test limpieza de cache."""
prompt_lib.get_template("tip_tech")
assert len(prompt_lib._cache) > 0
prompt_lib.clear_cache()
assert len(prompt_lib._cache) == 0
# ============================================================
# PlatformAdapter Tests
# ============================================================
class TestPlatformAdapter:
"""Tests para PlatformAdapter."""
@pytest.fixture
def adapter(self):
"""Create PlatformAdapter instance."""
from app.services.ai.platform_adapter import PlatformAdapter
return PlatformAdapter()
def test_get_limits(self, adapter):
"""Test obtención de límites."""
limits = adapter.get_limits("x")
assert limits["max_characters"] == 280
assert limits["max_hashtags"] == 2
def test_adapt_short_content(self, adapter):
"""Test adaptación de contenido corto."""
content = "Tip corto #Tech"
result = adapter.adapt(content, "x")
assert result.content == content.strip()
assert not result.truncated
assert result.platform == "x"
def test_adapt_long_content_truncates(self, adapter):
"""Test que contenido largo se trunca."""
# Contenido de 400 caracteres
content = "Este es un contenido muy largo. " * 15 + "#Tech #AI"
result = adapter.adapt(content, "x")
assert len(result.content) <= 280
assert result.truncated
assert "Contenido truncado" in str(result.changes_made)
def test_adapt_reduces_hashtags(self, adapter):
"""Test que hashtags se reducen al límite."""
content = "Tip importante #Tech #AI #Python #Tips #Code #Dev"
result = adapter.adapt(content, "x")
# X solo permite 2 hashtags
hashtag_count = result.content.count("#")
assert hashtag_count <= 2
assert result.hashtags_adjusted
def test_validate_for_platform_valid(self, adapter):
"""Test validación de contenido válido."""
content = "Tip corto #Tech"
validation = adapter.validate_for_platform(content, "x")
assert validation["valid"]
assert len(validation["issues"]) == 0
def test_validate_for_platform_too_long(self, adapter):
"""Test validación de contenido muy largo."""
content = "X" * 300 # Excede límite de X
validation = adapter.validate_for_platform(content, "x")
assert not validation["valid"]
assert any(i["type"] == "length" for i in validation["issues"])
def test_adapt_for_all_platforms(self, adapter):
"""Test adaptación para múltiples plataformas."""
content = "Contenido de prueba con mucho texto. " * 5 + "#Tech"
results = adapter.adapt_for_all_platforms(
content,
["x", "threads", "instagram"]
)
assert "x" in results
assert "threads" in results
assert "instagram" in results
# X debe ser más corto
assert len(results["x"].content) <= len(results["instagram"].content)
# ============================================================
# ContextEngine Tests
# ============================================================
class TestContextEngine:
"""Tests para ContextEngine."""
@pytest.fixture
def context_engine(self):
"""Create ContextEngine instance."""
from app.services.ai.context_engine import ContextEngine
return ContextEngine()
def test_analyze_content_detects_hook(self, context_engine):
"""Test detección de tipo de hook."""
# Pregunta retórica
content1 = "¿Sabías que el 90% de developers usan IA? #Tech"
analysis1 = context_engine.analyze_content(content1, "tip_tech", "x")
assert analysis1["hook_type"] == "pregunta_retórica"
# Dato impactante
content2 = "El 73% de empresas ya usan IA. #Tech"
analysis2 = context_engine.analyze_content(content2, "tip_tech", "x")
assert analysis2["hook_type"] == "dato_impactante"
# Tip directo
content3 = "Tip: usa ChatGPT para debugging. #Tech"
analysis3 = context_engine.analyze_content(content3, "tip_tech", "x")
assert analysis3["hook_type"] == "tip_directo"
def test_analyze_content_extracts_topics(self, context_engine):
"""Test extracción de temas."""
content = "ChatGPT y Claude son herramientas de IA geniales para productividad. #Tech"
analysis = context_engine.analyze_content(content, "tip_tech", "x")
assert "ia" in analysis["topics"]
assert "productividad" in analysis["topics"]
def test_suggest_hook_type_varies(self, context_engine):
"""Test que sugiere hooks variados."""
# Sin DB, debería retornar el primer preferido
mock_db = MagicMock()
mock_db.query.return_value.filter.return_value.order_by.return_value.limit.return_value.all.return_value = []
suggested = context_engine.suggest_hook_type(mock_db)
# Debería ser uno de los hooks preferidos
assert suggested in [
"pregunta_retórica", "dato_impactante", "tip_directo",
"afirmación_bold", "historia_corta"
]
# ============================================================
# ContentValidator Tests
# ============================================================
class TestContentValidator:
"""Tests para ContentValidator."""
@pytest.fixture
def validator(self):
"""Create ContentValidator instance."""
from app.services.ai.validator import ContentValidator
return ContentValidator()
def test_validate_length_passes(self, validator):
"""Test validación de longitud válida."""
content = "Contenido corto #Tech"
result = validator.validate(content, "x")
assert result.passed
assert not any(i["type"] == "length" for i in result.issues)
def test_validate_length_fails(self, validator):
"""Test validación de longitud excedida."""
content = "X" * 300 # Excede 280 de X
result = validator.validate(content, "x")
assert not result.passed
assert any(i["type"] == "length" and i["severity"] == "error" for i in result.issues)
def test_validate_prohibited_content(self, validator):
"""Test validación de contenido prohibido."""
# Modificar config temporalmente para test
validator.config["validations"]["prohibited_content"]["prohibited_words"] = ["test_banned"]
content = "Este contenido tiene test_banned palabra"
result = validator.validate(content, "x")
assert not result.passed
assert any(i["type"] == "prohibited_content" for i in result.issues)
def test_validate_empty_content_fails(self, validator):
"""Test que contenido vacío falla."""
content = " "
result = validator.validate(content, "x")
assert not result.passed
assert any(i["type"] == "empty_content" for i in result.issues)
def test_should_regenerate_respects_max_attempts(self, validator):
"""Test que regeneración respeta máximo de intentos."""
from app.services.ai.validator import ContentQualityResult, ValidationResult, ScoringResult
mock_quality = ContentQualityResult(
validation=ValidationResult(passed=True, issues=[], content="test"),
scoring=ScoringResult(
total_score=55,
breakdown={},
feedback="Needs improvement",
is_top_performer=False,
action="regenerate"
),
final_decision="regenerate",
content="test"
)
# Primer intento: debería regenerar
assert validator.should_regenerate(mock_quality, attempt=1)
# Segundo intento: debería regenerar
assert validator.should_regenerate(mock_quality, attempt=2)
# Tercer intento: NO debería regenerar (max_attempts=2)
assert not validator.should_regenerate(mock_quality, attempt=3)
# ============================================================
# Integration Tests
# ============================================================
class TestAIEngineIntegration:
"""Tests de integración del motor completo."""
@pytest.mark.asyncio
async def test_full_generation_flow_mocked(self, mock_openai_client):
"""Test flujo completo de generación con mock."""
with patch('app.services.ai.generator.settings') as mock_settings:
mock_settings.DEEPSEEK_API_KEY = "test-key"
mock_settings.DEEPSEEK_BASE_URL = "https://api.deepseek.com"
mock_settings.BUSINESS_NAME = "Test Corp"
mock_settings.BUSINESS_LOCATION = "Test City"
mock_settings.BUSINESS_WEBSITE = "test.com"
mock_settings.CONTENT_TONE = "Professional"
from app.services.ai.generator import ContentGeneratorV2
generator = ContentGeneratorV2()
generator._client = mock_openai_client
result = await generator.generate(
template_name="tip_tech",
variables={"category": "productividad"},
platform="x",
use_context=False,
use_few_shot=False
)
assert "content" in result
assert "adapted_content" in result
assert "metadata" in result
assert result["metadata"]["template"] == "tip_tech"
assert result["metadata"]["platform"] == "x"
def test_prompt_library_to_adapter_flow(self):
"""Test flujo de PromptLibrary a PlatformAdapter."""
from app.services.ai.prompt_library import PromptLibrary
from app.services.ai.platform_adapter import PlatformAdapter
lib = PromptLibrary()
adapter = PlatformAdapter()
# Obtener límites de plataforma desde library
limits_from_lib = lib.get_platform_limits("x")
# Obtener límites desde adapter
limits_from_adapter = adapter.get_limits("x")
# Deberían ser consistentes
assert limits_from_lib["max_characters"] == limits_from_adapter["max_characters"]
# ============================================================
# Fixtures adicionales
# ============================================================
@pytest.fixture
def mock_openai_client():
"""Mock OpenAI client for DeepSeek API tests."""
mock_client = MagicMock()
mock_response = MagicMock()
mock_response.choices = [MagicMock()]
mock_response.choices[0].message.content = "Generated test content #TechTip #AI"
mock_response.usage = MagicMock()
mock_response.usage.total_tokens = 100
mock_client.chat.completions.create.return_value = mock_response
return mock_client