- Detect threads by template name and skip platform truncation - Parse thread content into individual posts with numbering - Add thread_posts array to API response with post details - Evaluate quality on first post (hook) for threads - Add is_thread and thread_posts fields to GenerateV2Response Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
740 lines
22 KiB
Python
740 lines
22 KiB
Python
"""
|
|
Servicio de generación de contenido con DeepSeek API.
|
|
|
|
Este archivo mantiene la interfaz original (ContentGenerator) para
|
|
compatibilidad con código existente, pero internamente usa el nuevo
|
|
motor modular (ContentGeneratorV2) cuando está disponible.
|
|
|
|
Para nuevas integraciones, usar directamente:
|
|
from app.services.ai import ContentGeneratorV2, content_generator_v2
|
|
"""
|
|
|
|
import json
|
|
from typing import Optional, List, Dict, Any
|
|
from openai import OpenAI
|
|
from sqlalchemy.orm import Session
|
|
|
|
from app.core.config import settings
|
|
|
|
# Importar nuevo motor
|
|
try:
|
|
from app.services.ai import (
|
|
ContentGeneratorV2,
|
|
content_generator_v2,
|
|
ContextEngine,
|
|
context_engine,
|
|
ContentValidator,
|
|
content_validator,
|
|
)
|
|
NEW_ENGINE_AVAILABLE = True
|
|
except ImportError:
|
|
NEW_ENGINE_AVAILABLE = False
|
|
|
|
|
|
class ContentGenerator:
|
|
"""
|
|
Generador de contenido usando DeepSeek API.
|
|
|
|
Esta clase mantiene la interfaz original para compatibilidad.
|
|
Internamente delega al nuevo motor cuando está disponible.
|
|
|
|
Para nuevas funcionalidades, usar ContentGeneratorV2 directamente.
|
|
"""
|
|
|
|
def __init__(self, use_new_engine: bool = True):
|
|
"""
|
|
Inicializar el generador.
|
|
|
|
Args:
|
|
use_new_engine: Si usar el nuevo motor v2 (default: True)
|
|
"""
|
|
self._client = None
|
|
self.model = "deepseek-chat"
|
|
self._use_new_engine = use_new_engine and NEW_ENGINE_AVAILABLE
|
|
|
|
if self._use_new_engine:
|
|
self._v2 = content_generator_v2
|
|
self._validator = content_validator
|
|
self._context = context_engine
|
|
else:
|
|
self._v2 = None
|
|
self._validator = None
|
|
self._context = None
|
|
|
|
@property
|
|
def client(self):
|
|
"""Lazy initialization del cliente OpenAI."""
|
|
if self._client is None:
|
|
if not settings.DEEPSEEK_API_KEY:
|
|
raise ValueError("DEEPSEEK_API_KEY no configurada. Configura la variable de entorno.")
|
|
self._client = OpenAI(
|
|
api_key=settings.DEEPSEEK_API_KEY,
|
|
base_url=settings.DEEPSEEK_BASE_URL
|
|
)
|
|
return self._client
|
|
|
|
def _get_system_prompt(self) -> str:
|
|
"""Obtener el prompt del sistema con la personalidad de la marca."""
|
|
# Si hay nuevo motor, usar su prompt
|
|
if self._use_new_engine:
|
|
try:
|
|
from app.services.ai import prompt_library
|
|
return prompt_library.get_system_prompt()
|
|
except Exception:
|
|
pass
|
|
|
|
# Fallback al prompt original
|
|
return f"""Eres el Community Manager de {settings.BUSINESS_NAME}, una empresa de tecnología ubicada en {settings.BUSINESS_LOCATION}.
|
|
|
|
SOBRE LA EMPRESA:
|
|
- Especializada en soluciones de IA, automatización y transformación digital
|
|
- Vende equipos de cómputo e impresoras 3D
|
|
- Sitio web: {settings.BUSINESS_WEBSITE}
|
|
|
|
TONO DE COMUNICACIÓN:
|
|
{settings.CONTENT_TONE}
|
|
|
|
ESTILO (inspirado en @midudev, @MoureDev, @SoyDalto):
|
|
- Tips cortos y accionables
|
|
- Contenido educativo de valor
|
|
- Cercano pero profesional
|
|
- Uso moderado de emojis
|
|
- Hashtags relevantes (máximo 3-5)
|
|
|
|
REGLAS:
|
|
- Nunca uses lenguaje ofensivo
|
|
- No hagas promesas exageradas
|
|
- Sé honesto y transparente
|
|
- Enfócate en ayudar, no en vender directamente
|
|
- Adapta el contenido a cada plataforma"""
|
|
|
|
# === Métodos principales con nuevo motor ===
|
|
|
|
async def generate_tip_tech(
|
|
self,
|
|
category: str,
|
|
platform: str,
|
|
template: Optional[str] = None,
|
|
db: Optional[Session] = None,
|
|
validate: bool = True
|
|
) -> str:
|
|
"""
|
|
Generar un tip tech.
|
|
|
|
Args:
|
|
category: Categoría del tip
|
|
platform: Plataforma destino
|
|
template: Template opcional (ignorado en v2)
|
|
db: Sesión de DB para context engine
|
|
validate: Si validar el contenido generado
|
|
|
|
Returns:
|
|
Contenido del tip
|
|
"""
|
|
if self._use_new_engine:
|
|
result = await self._v2.generate_tip(
|
|
category=category,
|
|
platform=platform,
|
|
db=db
|
|
)
|
|
|
|
content = result["adapted_content"]
|
|
|
|
# Validar y regenerar si es necesario
|
|
if validate and self._validator:
|
|
quality = await self._validator.evaluate(content, platform)
|
|
|
|
if quality.final_decision == "regenerate":
|
|
# Regenerar con hints
|
|
hints = self._validator.get_regeneration_hints(quality)
|
|
result = await self._v2.generate(
|
|
template_name="tip_tech",
|
|
variables={
|
|
"category": category,
|
|
"difficulty_level": "principiante",
|
|
"target_audience": "profesionales tech"
|
|
},
|
|
platform=platform,
|
|
db=db,
|
|
temperature_override=0.9 # Más creatividad en retry
|
|
)
|
|
content = result["adapted_content"]
|
|
|
|
return content
|
|
|
|
# Fallback a implementación original
|
|
return await self._generate_tip_tech_legacy(category, platform, template)
|
|
|
|
async def _generate_tip_tech_legacy(
|
|
self,
|
|
category: str,
|
|
platform: str,
|
|
template: Optional[str] = None
|
|
) -> str:
|
|
"""Implementación original de generate_tip_tech."""
|
|
char_limits = {
|
|
"x": 280,
|
|
"threads": 500,
|
|
"instagram": 2200,
|
|
"facebook": 500
|
|
}
|
|
|
|
prompt = f"""Genera un tip de tecnología para la categoría: {category}
|
|
|
|
PLATAFORMA: {platform}
|
|
LÍMITE DE CARACTERES: {char_limits.get(platform, 500)}
|
|
|
|
{f'USA ESTE TEMPLATE COMO BASE: {template}' if template else ''}
|
|
|
|
REQUISITOS:
|
|
- Tip práctico y accionable
|
|
- Fácil de entender
|
|
- Incluye un emoji relevante al inicio
|
|
- Termina con 2-3 hashtags relevantes
|
|
- NO incluyas enlaces
|
|
|
|
Responde SOLO con el texto del post, sin explicaciones."""
|
|
|
|
response = self.client.chat.completions.create(
|
|
model=self.model,
|
|
messages=[
|
|
{"role": "system", "content": self._get_system_prompt()},
|
|
{"role": "user", "content": prompt}
|
|
],
|
|
max_tokens=300,
|
|
temperature=0.7
|
|
)
|
|
|
|
return response.choices[0].message.content.strip()
|
|
|
|
async def generate_product_post(
|
|
self,
|
|
product: Dict,
|
|
platform: str,
|
|
db: Optional[Session] = None,
|
|
validate: bool = True
|
|
) -> str:
|
|
"""
|
|
Generar post para un producto.
|
|
|
|
Args:
|
|
product: Dict con datos del producto
|
|
platform: Plataforma destino
|
|
db: Sesión de DB
|
|
validate: Si validar contenido
|
|
|
|
Returns:
|
|
Contenido del post
|
|
"""
|
|
if self._use_new_engine:
|
|
result = await self._v2.generate_product_post(
|
|
product=product,
|
|
platform=platform,
|
|
db=db
|
|
)
|
|
|
|
content = result["adapted_content"]
|
|
|
|
if validate and self._validator:
|
|
quality = await self._validator.evaluate(content, platform)
|
|
if quality.final_decision == "regenerate":
|
|
result = await self._v2.generate_product_post(
|
|
product=product,
|
|
platform=platform,
|
|
db=db,
|
|
temperature_override=0.9
|
|
)
|
|
content = result["adapted_content"]
|
|
|
|
return content
|
|
|
|
# Fallback a implementación original
|
|
return await self._generate_product_post_legacy(product, platform)
|
|
|
|
async def _generate_product_post_legacy(
|
|
self,
|
|
product: Dict,
|
|
platform: str
|
|
) -> str:
|
|
"""Implementación original de generate_product_post."""
|
|
char_limits = {
|
|
"x": 280,
|
|
"threads": 500,
|
|
"instagram": 2200,
|
|
"facebook": 1000
|
|
}
|
|
|
|
prompt = f"""Genera un post promocional para este producto:
|
|
|
|
PRODUCTO: {product['name']}
|
|
DESCRIPCIÓN: {product.get('description', 'N/A')}
|
|
PRECIO: ${product['price']:,.2f} MXN
|
|
CATEGORÍA: {product['category']}
|
|
ESPECIFICACIONES: {json.dumps(product.get('specs', {}), ensure_ascii=False)}
|
|
PUNTOS DESTACADOS: {', '.join(product.get('highlights', []))}
|
|
|
|
PLATAFORMA: {platform}
|
|
LÍMITE DE CARACTERES: {char_limits.get(platform, 500)}
|
|
|
|
REQUISITOS:
|
|
- Destaca los beneficios principales
|
|
- Incluye el precio
|
|
- Usa emojis relevantes
|
|
- Incluye CTA sutil (ej: "Contáctanos", "Más info en DM")
|
|
- Termina con 2-3 hashtags
|
|
- NO inventes especificaciones
|
|
|
|
Responde SOLO con el texto del post."""
|
|
|
|
response = self.client.chat.completions.create(
|
|
model=self.model,
|
|
messages=[
|
|
{"role": "system", "content": self._get_system_prompt()},
|
|
{"role": "user", "content": prompt}
|
|
],
|
|
max_tokens=400,
|
|
temperature=0.7
|
|
)
|
|
|
|
return response.choices[0].message.content.strip()
|
|
|
|
async def generate_service_post(
|
|
self,
|
|
service: Dict,
|
|
platform: str,
|
|
db: Optional[Session] = None,
|
|
validate: bool = True
|
|
) -> str:
|
|
"""
|
|
Generar post para un servicio.
|
|
|
|
Args:
|
|
service: Dict con datos del servicio
|
|
platform: Plataforma destino
|
|
db: Sesión de DB
|
|
validate: Si validar contenido
|
|
|
|
Returns:
|
|
Contenido del post
|
|
"""
|
|
if self._use_new_engine:
|
|
result = await self._v2.generate_service_post(
|
|
service=service,
|
|
platform=platform,
|
|
db=db
|
|
)
|
|
|
|
content = result["adapted_content"]
|
|
|
|
if validate and self._validator:
|
|
quality = await self._validator.evaluate(content, platform)
|
|
if quality.final_decision == "regenerate":
|
|
result = await self._v2.generate_service_post(
|
|
service=service,
|
|
platform=platform,
|
|
db=db,
|
|
temperature_override=0.9
|
|
)
|
|
content = result["adapted_content"]
|
|
|
|
return content
|
|
|
|
# Fallback
|
|
return await self._generate_service_post_legacy(service, platform)
|
|
|
|
async def _generate_service_post_legacy(
|
|
self,
|
|
service: Dict,
|
|
platform: str
|
|
) -> str:
|
|
"""Implementación original de generate_service_post."""
|
|
char_limits = {
|
|
"x": 280,
|
|
"threads": 500,
|
|
"instagram": 2200,
|
|
"facebook": 1000
|
|
}
|
|
|
|
prompt = f"""Genera un post promocional para este servicio:
|
|
|
|
SERVICIO: {service['name']}
|
|
DESCRIPCIÓN: {service.get('description', 'N/A')}
|
|
CATEGORÍA: {service['category']}
|
|
SECTORES OBJETIVO: {', '.join(service.get('target_sectors', []))}
|
|
BENEFICIOS: {', '.join(service.get('benefits', []))}
|
|
CTA: {service.get('call_to_action', 'Contáctanos para más información')}
|
|
|
|
PLATAFORMA: {platform}
|
|
LÍMITE DE CARACTERES: {char_limits.get(platform, 500)}
|
|
|
|
REQUISITOS:
|
|
- Enfócate en el problema que resuelve
|
|
- Destaca 2-3 beneficios clave
|
|
- Usa emojis relevantes (✅, 🚀, 💡)
|
|
- Incluye el CTA
|
|
- Termina con 2-3 hashtags
|
|
- Tono consultivo, no vendedor
|
|
|
|
Responde SOLO con el texto del post."""
|
|
|
|
response = self.client.chat.completions.create(
|
|
model=self.model,
|
|
messages=[
|
|
{"role": "system", "content": self._get_system_prompt()},
|
|
{"role": "user", "content": prompt}
|
|
],
|
|
max_tokens=400,
|
|
temperature=0.7
|
|
)
|
|
|
|
return response.choices[0].message.content.strip()
|
|
|
|
async def generate_thread(
|
|
self,
|
|
topic: str,
|
|
num_posts: int = 5,
|
|
db: Optional[Session] = None
|
|
) -> List[str]:
|
|
"""
|
|
Generar un hilo educativo.
|
|
|
|
Args:
|
|
topic: Tema del hilo
|
|
num_posts: Número de posts
|
|
db: Sesión de DB
|
|
|
|
Returns:
|
|
Lista de posts del hilo
|
|
"""
|
|
if self._use_new_engine:
|
|
return await self._v2.generate_thread(
|
|
topic=topic,
|
|
num_posts=num_posts,
|
|
db=db
|
|
)
|
|
|
|
# Fallback
|
|
return await self._generate_thread_legacy(topic, num_posts)
|
|
|
|
async def _generate_thread_legacy(
|
|
self,
|
|
topic: str,
|
|
num_posts: int = 5
|
|
) -> List[str]:
|
|
"""Implementación original de generate_thread."""
|
|
prompt = f"""Genera un hilo educativo de {num_posts} posts sobre: {topic}
|
|
|
|
REQUISITOS:
|
|
- Post 1: Gancho que capture atención
|
|
- Posts 2-{num_posts-1}: Contenido educativo de valor
|
|
- Post {num_posts}: Conclusión con CTA
|
|
|
|
FORMATO:
|
|
- Cada post máximo 280 caracteres
|
|
- Numera cada post (1/, 2/, etc.)
|
|
- Usa emojis relevantes
|
|
- El último post incluye hashtags
|
|
|
|
Responde con cada post en una línea separada, sin explicaciones."""
|
|
|
|
response = self.client.chat.completions.create(
|
|
model=self.model,
|
|
messages=[
|
|
{"role": "system", "content": self._get_system_prompt()},
|
|
{"role": "user", "content": prompt}
|
|
],
|
|
max_tokens=1500,
|
|
temperature=0.7
|
|
)
|
|
|
|
content = response.choices[0].message.content.strip()
|
|
posts = [p.strip() for p in content.split('\n') if p.strip()]
|
|
|
|
return posts
|
|
|
|
async def generate_response_suggestion(
|
|
self,
|
|
interaction_content: str,
|
|
interaction_type: str,
|
|
context: Optional[str] = None
|
|
) -> List[str]:
|
|
"""
|
|
Generar sugerencias de respuesta para una interacción.
|
|
|
|
Args:
|
|
interaction_content: Contenido de la interacción
|
|
interaction_type: Tipo de interacción
|
|
context: Contexto adicional
|
|
|
|
Returns:
|
|
Lista de 3 opciones de respuesta
|
|
"""
|
|
if self._use_new_engine:
|
|
return await self._v2.generate_response(
|
|
interaction_content=interaction_content,
|
|
interaction_type=interaction_type,
|
|
context=context
|
|
)
|
|
|
|
# Fallback
|
|
return await self._generate_response_legacy(
|
|
interaction_content, interaction_type, context
|
|
)
|
|
|
|
async def _generate_response_legacy(
|
|
self,
|
|
interaction_content: str,
|
|
interaction_type: str,
|
|
context: Optional[str] = None
|
|
) -> List[str]:
|
|
"""Implementación original de generate_response_suggestion."""
|
|
prompt = f"""Un usuario escribió esto en redes sociales:
|
|
|
|
"{interaction_content}"
|
|
|
|
TIPO DE INTERACCIÓN: {interaction_type}
|
|
{f'CONTEXTO ADICIONAL: {context}' if context else ''}
|
|
|
|
Genera 3 opciones de respuesta diferentes:
|
|
1. Respuesta corta y amigable
|
|
2. Respuesta que invite a continuar la conversación
|
|
3. Respuesta que dirija a más información/contacto
|
|
|
|
REQUISITOS:
|
|
- Máximo 280 caracteres cada una
|
|
- Tono amigable y profesional
|
|
- Si es una queja, sé empático
|
|
- Si es una pregunta técnica, sé útil
|
|
|
|
Responde con las 3 opciones numeradas, una por línea."""
|
|
|
|
response = self.client.chat.completions.create(
|
|
model=self.model,
|
|
messages=[
|
|
{"role": "system", "content": self._get_system_prompt()},
|
|
{"role": "user", "content": prompt}
|
|
],
|
|
max_tokens=500,
|
|
temperature=0.8
|
|
)
|
|
|
|
content = response.choices[0].message.content.strip()
|
|
suggestions = [s.strip() for s in content.split('\n') if s.strip()]
|
|
|
|
cleaned = []
|
|
for s in suggestions:
|
|
if s[0].isdigit() and (s[1] == '.' or s[1] == ')'):
|
|
s = s[2:].strip()
|
|
cleaned.append(s)
|
|
|
|
return cleaned[:3]
|
|
|
|
async def adapt_content_for_platform(
|
|
self,
|
|
content: str,
|
|
target_platform: str
|
|
) -> str:
|
|
"""
|
|
Adaptar contenido existente a una plataforma específica.
|
|
|
|
Args:
|
|
content: Contenido original
|
|
target_platform: Plataforma destino
|
|
|
|
Returns:
|
|
Contenido adaptado
|
|
"""
|
|
if self._use_new_engine:
|
|
# Detectar plataforma de origen (asumimos la más genérica)
|
|
return await self._v2.adapt_content(
|
|
content=content,
|
|
source_platform="instagram", # Asume origen genérico
|
|
target_platform=target_platform
|
|
)
|
|
|
|
# Fallback
|
|
return await self._adapt_content_legacy(content, target_platform)
|
|
|
|
async def _adapt_content_legacy(
|
|
self,
|
|
content: str,
|
|
target_platform: str
|
|
) -> str:
|
|
"""Implementación original de adapt_content_for_platform."""
|
|
char_limits = {
|
|
"x": 280,
|
|
"threads": 500,
|
|
"instagram": 2200,
|
|
"facebook": 1000
|
|
}
|
|
|
|
prompt = f"""Adapta este contenido para {target_platform}:
|
|
|
|
CONTENIDO ORIGINAL:
|
|
{content}
|
|
|
|
LÍMITE DE CARACTERES: {char_limits.get(target_platform, 500)}
|
|
|
|
REQUISITOS PARA {target_platform.upper()}:
|
|
{"- Muy conciso, directo al punto" if target_platform == "x" else ""}
|
|
{"- Puede ser más extenso, incluir más contexto" if target_platform == "instagram" else ""}
|
|
{"- Tono más casual y cercano" if target_platform == "threads" else ""}
|
|
{"- Puede incluir links, más profesional" if target_platform == "facebook" else ""}
|
|
- Mantén la esencia del mensaje
|
|
- Ajusta hashtags según la plataforma
|
|
|
|
Responde SOLO con el contenido adaptado."""
|
|
|
|
response = self.client.chat.completions.create(
|
|
model=self.model,
|
|
messages=[
|
|
{"role": "system", "content": self._get_system_prompt()},
|
|
{"role": "user", "content": prompt}
|
|
],
|
|
max_tokens=400,
|
|
temperature=0.6
|
|
)
|
|
|
|
return response.choices[0].message.content.strip()
|
|
|
|
# === Nuevos métodos (solo v2) ===
|
|
|
|
async def generate_with_quality_check(
|
|
self,
|
|
template_name: str,
|
|
variables: Dict[str, Any],
|
|
platform: str,
|
|
db: Optional[Session] = None,
|
|
max_attempts: int = 2
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Generar contenido con validación y regeneración automática.
|
|
|
|
Solo disponible con el nuevo motor.
|
|
|
|
Args:
|
|
template_name: Nombre del template
|
|
variables: Variables para el template
|
|
platform: Plataforma destino
|
|
db: Sesión de DB
|
|
max_attempts: Máximo intentos de regeneración
|
|
|
|
Returns:
|
|
Dict con contenido, score, y metadata
|
|
"""
|
|
if not self._use_new_engine:
|
|
raise RuntimeError(
|
|
"Este método requiere el nuevo motor. "
|
|
"Asegúrate de que app.services.ai esté disponible."
|
|
)
|
|
|
|
attempt = 0
|
|
temperature = 0.7
|
|
is_thread = template_name == "thread"
|
|
|
|
while attempt < max_attempts:
|
|
attempt += 1
|
|
|
|
# Generar
|
|
result = await self._v2.generate(
|
|
template_name=template_name,
|
|
variables=variables,
|
|
platform=platform,
|
|
db=db,
|
|
temperature_override=temperature
|
|
)
|
|
|
|
# Para hilos, usar contenido original (no truncado)
|
|
if is_thread:
|
|
content = result["content"]
|
|
else:
|
|
content = result["adapted_content"]
|
|
|
|
# Evaluar calidad (para hilos, evaluar solo el primer post como muestra)
|
|
content_to_evaluate = content
|
|
if is_thread and result.get("thread_posts"):
|
|
# Evaluar el hook (primer post) como indicador de calidad
|
|
first_post = result["thread_posts"][0]["content"]
|
|
content_to_evaluate = first_post
|
|
|
|
quality = await self._validator.evaluate(content_to_evaluate, platform)
|
|
|
|
# Si pasa, retornar
|
|
if quality.final_decision == "accept":
|
|
response = {
|
|
"content": content,
|
|
"quality_score": quality.scoring.total_score if quality.scoring else None,
|
|
"score_breakdown": quality.scoring.breakdown if quality.scoring else None,
|
|
"is_top_performer": quality.scoring.is_top_performer if quality.scoring else False,
|
|
"attempts": attempt,
|
|
"metadata": result["metadata"],
|
|
"is_thread": is_thread,
|
|
}
|
|
|
|
# Agregar posts del hilo si aplica
|
|
if is_thread:
|
|
response["thread_posts"] = result.get("thread_posts", [])
|
|
|
|
return response
|
|
|
|
# Si debe regenerar, aumentar temperature
|
|
temperature = min(1.0, temperature + 0.1)
|
|
|
|
# Si llegamos aquí, usar el último intento aunque no sea ideal
|
|
response = {
|
|
"content": content,
|
|
"quality_score": quality.scoring.total_score if quality.scoring else None,
|
|
"score_breakdown": quality.scoring.breakdown if quality.scoring else None,
|
|
"is_top_performer": False,
|
|
"attempts": attempt,
|
|
"metadata": result["metadata"],
|
|
"warning": "Contenido aceptado después de máximos intentos",
|
|
"is_thread": is_thread,
|
|
}
|
|
|
|
if is_thread:
|
|
response["thread_posts"] = result.get("thread_posts", [])
|
|
|
|
return response
|
|
|
|
async def save_to_memory(
|
|
self,
|
|
db: Session,
|
|
post_id: int,
|
|
content: str,
|
|
content_type: str,
|
|
platform: str,
|
|
quality_score: Optional[int] = None,
|
|
quality_breakdown: Optional[Dict] = None
|
|
):
|
|
"""
|
|
Guardar contenido en memoria para tracking.
|
|
|
|
Solo disponible con el nuevo motor.
|
|
|
|
Args:
|
|
db: Sesión de DB
|
|
post_id: ID del post
|
|
content: Contenido generado
|
|
content_type: Tipo de contenido
|
|
platform: Plataforma
|
|
quality_score: Score de calidad
|
|
quality_breakdown: Breakdown del score
|
|
"""
|
|
if not self._use_new_engine:
|
|
return # Silenciosamente ignorar si no hay nuevo motor
|
|
|
|
self._context.save_to_memory(
|
|
db=db,
|
|
post_id=post_id,
|
|
content=content,
|
|
content_type=content_type,
|
|
platform=platform,
|
|
quality_score=quality_score,
|
|
quality_breakdown=quality_breakdown
|
|
)
|
|
|
|
|
|
# Instancia global
|
|
content_generator = ContentGenerator()
|