Phase 1 - Analytics y Reportes: - PostMetrics and AnalyticsReport models for tracking engagement - Analytics service with dashboard stats, top posts, optimal times - 8 API endpoints at /api/analytics/* - Interactive dashboard with Chart.js charts - Celery tasks for metrics fetch (15min) and weekly reports Phase 2 - Integración Odoo: - Lead and OdooSyncLog models for CRM integration - Odoo fields added to Product and Service models - XML-RPC service for bidirectional sync - Lead management API at /api/leads/* - Leads dashboard template - Celery tasks for product/service sync and lead export Phase 3 - A/B Testing y Recycling: - ABTest, ABTestVariant, RecycledPost models - Statistical winner analysis using chi-square test - Content recycling with engagement-based scoring - APIs at /api/ab-tests/* and /api/recycling/* - Automated test evaluation and content recycling tasks Phase 4 - Thread Series y Templates: - ThreadSeries and ThreadPost models for multi-post threads - AI-powered thread generation - Enhanced ImageTemplate with HTML template support - APIs at /api/threads/* and /api/templates/* - Thread scheduling with reply chain support Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
217 lines
5.4 KiB
Python
217 lines
5.4 KiB
Python
"""
|
|
API Routes for A/B Testing.
|
|
"""
|
|
|
|
from typing import List, Optional
|
|
from fastapi import APIRouter, Depends, HTTPException, Query
|
|
from sqlalchemy.orm import Session
|
|
from pydantic import BaseModel
|
|
|
|
from app.core.database import get_db
|
|
from app.services.ab_testing_service import ab_testing_service
|
|
|
|
|
|
router = APIRouter()
|
|
|
|
|
|
class VariantCreate(BaseModel):
|
|
"""Schema for creating a test variant."""
|
|
name: str # A, B, C, etc.
|
|
content: str
|
|
hashtags: Optional[List[str]] = None
|
|
image_url: Optional[str] = None
|
|
|
|
|
|
class ABTestCreate(BaseModel):
|
|
"""Schema for creating an A/B test."""
|
|
name: str
|
|
platform: str
|
|
variants: List[VariantCreate]
|
|
test_type: str = "content"
|
|
duration_hours: int = 24
|
|
min_sample_size: int = 100
|
|
success_metric: str = "engagement_rate"
|
|
description: Optional[str] = None
|
|
|
|
|
|
@router.get("/")
|
|
async def list_tests(
|
|
status: Optional[str] = None,
|
|
platform: Optional[str] = None,
|
|
limit: int = Query(20, ge=1, le=100),
|
|
db: Session = Depends(get_db)
|
|
):
|
|
"""
|
|
List all A/B tests.
|
|
|
|
- **status**: Filter by status (draft, running, completed, cancelled)
|
|
- **platform**: Filter by platform
|
|
"""
|
|
tests = await ab_testing_service.get_tests(
|
|
status=status,
|
|
platform=platform,
|
|
limit=limit
|
|
)
|
|
return {"tests": tests, "count": len(tests)}
|
|
|
|
|
|
@router.get("/{test_id}")
|
|
async def get_test(
|
|
test_id: int,
|
|
db: Session = Depends(get_db)
|
|
):
|
|
"""
|
|
Get a specific A/B test with its variants.
|
|
"""
|
|
test = await ab_testing_service.get_test(test_id)
|
|
if not test:
|
|
raise HTTPException(status_code=404, detail="Test not found")
|
|
return test
|
|
|
|
|
|
@router.post("/")
|
|
async def create_test(
|
|
test_data: ABTestCreate,
|
|
db: Session = Depends(get_db)
|
|
):
|
|
"""
|
|
Create a new A/B test.
|
|
|
|
Requires at least 2 variants. Variants should have:
|
|
- **name**: Identifier (A, B, C, etc.)
|
|
- **content**: The content to test
|
|
- **hashtags**: Optional hashtag list
|
|
- **image_url**: Optional image URL
|
|
"""
|
|
if len(test_data.variants) < 2:
|
|
raise HTTPException(
|
|
status_code=400,
|
|
detail="A/B test requires at least 2 variants"
|
|
)
|
|
|
|
if len(test_data.variants) > 4:
|
|
raise HTTPException(
|
|
status_code=400,
|
|
detail="Maximum 4 variants allowed per test"
|
|
)
|
|
|
|
try:
|
|
test = await ab_testing_service.create_test(
|
|
name=test_data.name,
|
|
platform=test_data.platform,
|
|
variants=[v.dict() for v in test_data.variants],
|
|
test_type=test_data.test_type,
|
|
duration_hours=test_data.duration_hours,
|
|
min_sample_size=test_data.min_sample_size,
|
|
success_metric=test_data.success_metric,
|
|
description=test_data.description
|
|
)
|
|
|
|
return {
|
|
"message": "A/B test created successfully",
|
|
"test": test.to_dict()
|
|
}
|
|
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=str(e))
|
|
|
|
|
|
@router.post("/{test_id}/start")
|
|
async def start_test(
|
|
test_id: int,
|
|
db: Session = Depends(get_db)
|
|
):
|
|
"""
|
|
Start an A/B test.
|
|
|
|
This will create and schedule posts for each variant.
|
|
"""
|
|
result = await ab_testing_service.start_test(test_id)
|
|
|
|
if not result.get("success"):
|
|
raise HTTPException(
|
|
status_code=400,
|
|
detail=result.get("error", "Failed to start test")
|
|
)
|
|
|
|
return {
|
|
"message": "A/B test started successfully",
|
|
**result
|
|
}
|
|
|
|
|
|
@router.post("/{test_id}/evaluate")
|
|
async def evaluate_test(
|
|
test_id: int,
|
|
db: Session = Depends(get_db)
|
|
):
|
|
"""
|
|
Evaluate A/B test results and determine winner.
|
|
|
|
Updates metrics from posts and calculates statistical significance.
|
|
"""
|
|
result = await ab_testing_service.evaluate_test(test_id)
|
|
|
|
if not result.get("success"):
|
|
raise HTTPException(
|
|
status_code=400,
|
|
detail=result.get("error", "Failed to evaluate test")
|
|
)
|
|
|
|
return result
|
|
|
|
|
|
@router.get("/{test_id}/results")
|
|
async def get_test_results(
|
|
test_id: int,
|
|
db: Session = Depends(get_db)
|
|
):
|
|
"""
|
|
Get current results for an A/B test.
|
|
"""
|
|
# First update metrics
|
|
await ab_testing_service.update_variant_metrics(test_id)
|
|
|
|
test = await ab_testing_service.get_test(test_id)
|
|
if not test:
|
|
raise HTTPException(status_code=404, detail="Test not found")
|
|
|
|
# Calculate current standings
|
|
variants = test.get("variants", [])
|
|
if variants:
|
|
sorted_variants = sorted(
|
|
variants,
|
|
key=lambda v: v.get("engagement_rate", 0),
|
|
reverse=True
|
|
)
|
|
else:
|
|
sorted_variants = []
|
|
|
|
return {
|
|
"test_id": test_id,
|
|
"status": test.get("status"),
|
|
"started_at": test.get("started_at"),
|
|
"winning_variant_id": test.get("winning_variant_id"),
|
|
"confidence_level": test.get("confidence_level"),
|
|
"variants": sorted_variants
|
|
}
|
|
|
|
|
|
@router.post("/{test_id}/cancel")
|
|
async def cancel_test(
|
|
test_id: int,
|
|
db: Session = Depends(get_db)
|
|
):
|
|
"""
|
|
Cancel a running A/B test.
|
|
"""
|
|
result = await ab_testing_service.cancel_test(test_id)
|
|
|
|
if not result.get("success"):
|
|
raise HTTPException(
|
|
status_code=400,
|
|
detail=result.get("error", "Failed to cancel test")
|
|
)
|
|
|
|
return {"message": "Test cancelled successfully"}
|