feat(fase4): add AI response and sentiment nodes
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -5,6 +5,11 @@ from functools import lru_cache
|
|||||||
class Settings(BaseSettings):
|
class Settings(BaseSettings):
|
||||||
DATABASE_URL: str = "postgresql://whatsapp_admin:password@localhost:5432/whatsapp_central"
|
DATABASE_URL: str = "postgresql://whatsapp_admin:password@localhost:5432/whatsapp_central"
|
||||||
REDIS_URL: str = "redis://localhost:6379"
|
REDIS_URL: str = "redis://localhost:6379"
|
||||||
|
|
||||||
|
# OpenAI
|
||||||
|
OPENAI_API_KEY: str = ""
|
||||||
|
OPENAI_MODEL: str = "gpt-3.5-turbo"
|
||||||
|
|
||||||
API_GATEWAY_URL: str = "http://localhost:8000"
|
API_GATEWAY_URL: str = "http://localhost:8000"
|
||||||
WHATSAPP_CORE_URL: str = "http://localhost:3001"
|
WHATSAPP_CORE_URL: str = "http://localhost:3001"
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,18 @@
|
|||||||
|
from app.nodes.advanced import (
|
||||||
|
DelayExecutor,
|
||||||
|
GoToExecutor,
|
||||||
|
LoopExecutor,
|
||||||
|
RandomExecutor,
|
||||||
|
SwitchExecutor,
|
||||||
|
)
|
||||||
|
from app.nodes.ai import AIResponseExecutor, AISentimentExecutor
|
||||||
from app.nodes.base import NodeExecutor, NodeRegistry
|
from app.nodes.base import NodeExecutor, NodeRegistry
|
||||||
from app.nodes.basic import (
|
from app.nodes.basic import (
|
||||||
TriggerExecutor, MessageExecutor, ButtonsExecutor,
|
ButtonsExecutor,
|
||||||
WaitInputExecutor, SetVariableExecutor, ConditionExecutor
|
ConditionExecutor,
|
||||||
|
MessageExecutor,
|
||||||
|
SetVariableExecutor,
|
||||||
|
TriggerExecutor,
|
||||||
|
WaitInputExecutor,
|
||||||
)
|
)
|
||||||
from app.nodes.script import JavaScriptExecutor, HttpRequestExecutor
|
from app.nodes.script import HttpRequestExecutor, JavaScriptExecutor
|
||||||
|
|||||||
117
services/flow-engine/app/nodes/ai.py
Normal file
117
services/flow-engine/app/nodes/ai.py
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
from typing import Any, Optional
|
||||||
|
|
||||||
|
import httpx
|
||||||
|
|
||||||
|
from app.config import get_settings
|
||||||
|
from app.context import FlowContext
|
||||||
|
from app.nodes.base import NodeExecutor
|
||||||
|
|
||||||
|
settings = get_settings()
|
||||||
|
|
||||||
|
|
||||||
|
class AIResponseExecutor(NodeExecutor):
|
||||||
|
"""Generate AI response using OpenAI"""
|
||||||
|
|
||||||
|
async def execute(
|
||||||
|
self, config: dict, context: FlowContext, session: Any
|
||||||
|
) -> Optional[str]:
|
||||||
|
prompt = context.interpolate(config.get("prompt", ""))
|
||||||
|
system_prompt = config.get("system_prompt", "Eres un asistente útil.")
|
||||||
|
output_variable = config.get("output_variable", "_ai_response")
|
||||||
|
max_tokens = config.get("max_tokens", 500)
|
||||||
|
temperature = config.get("temperature", 0.7)
|
||||||
|
|
||||||
|
if not settings.OPENAI_API_KEY:
|
||||||
|
context.set("_ai_error", "OpenAI API key not configured")
|
||||||
|
return "error"
|
||||||
|
|
||||||
|
if not prompt:
|
||||||
|
return "error"
|
||||||
|
|
||||||
|
messages = [
|
||||||
|
{"role": "system", "content": system_prompt},
|
||||||
|
{"role": "user", "content": prompt},
|
||||||
|
]
|
||||||
|
|
||||||
|
if config.get("include_history", False):
|
||||||
|
history = context.get("_conversation_history") or []
|
||||||
|
messages = (
|
||||||
|
[{"role": "system", "content": system_prompt}]
|
||||||
|
+ history
|
||||||
|
+ [{"role": "user", "content": prompt}]
|
||||||
|
)
|
||||||
|
|
||||||
|
async with httpx.AsyncClient() as client:
|
||||||
|
response = await client.post(
|
||||||
|
"https://api.openai.com/v1/chat/completions",
|
||||||
|
headers={
|
||||||
|
"Authorization": f"Bearer {settings.OPENAI_API_KEY}",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
},
|
||||||
|
json={
|
||||||
|
"model": settings.OPENAI_MODEL,
|
||||||
|
"messages": messages,
|
||||||
|
"max_tokens": max_tokens,
|
||||||
|
"temperature": temperature,
|
||||||
|
},
|
||||||
|
timeout=30,
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code != 200:
|
||||||
|
context.set("_ai_error", response.text)
|
||||||
|
return "error"
|
||||||
|
|
||||||
|
data = response.json()
|
||||||
|
ai_response = data["choices"][0]["message"]["content"]
|
||||||
|
context.set(output_variable, ai_response)
|
||||||
|
return "success"
|
||||||
|
|
||||||
|
|
||||||
|
class AISentimentExecutor(NodeExecutor):
|
||||||
|
"""Analyze sentiment of user message"""
|
||||||
|
|
||||||
|
async def execute(
|
||||||
|
self, config: dict, context: FlowContext, session: Any
|
||||||
|
) -> Optional[str]:
|
||||||
|
text = context.get(config.get("variable", "")) or context.message.get(
|
||||||
|
"content", ""
|
||||||
|
)
|
||||||
|
output_variable = config.get("output_variable", "_sentiment")
|
||||||
|
|
||||||
|
if not settings.OPENAI_API_KEY or not text:
|
||||||
|
return "neutral"
|
||||||
|
|
||||||
|
async with httpx.AsyncClient() as client:
|
||||||
|
response = await client.post(
|
||||||
|
"https://api.openai.com/v1/chat/completions",
|
||||||
|
headers={
|
||||||
|
"Authorization": f"Bearer {settings.OPENAI_API_KEY}",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
},
|
||||||
|
json={
|
||||||
|
"model": "gpt-3.5-turbo",
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "system",
|
||||||
|
"content": "Analyze the sentiment. Reply with only one word: positive, negative, or neutral",
|
||||||
|
},
|
||||||
|
{"role": "user", "content": text},
|
||||||
|
],
|
||||||
|
"max_tokens": 10,
|
||||||
|
"temperature": 0,
|
||||||
|
},
|
||||||
|
timeout=15,
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code != 200:
|
||||||
|
return "neutral"
|
||||||
|
|
||||||
|
data = response.json()
|
||||||
|
sentiment = data["choices"][0]["message"]["content"].lower().strip()
|
||||||
|
context.set(output_variable, sentiment)
|
||||||
|
|
||||||
|
if "positive" in sentiment:
|
||||||
|
return "positive"
|
||||||
|
if "negative" in sentiment:
|
||||||
|
return "negative"
|
||||||
|
return "neutral"
|
||||||
Reference in New Issue
Block a user