feat(phase-6): Complete testing and deployment setup
Testing: - Add pytest configuration (pytest.ini) - Add test fixtures (tests/conftest.py) - Add ContentGenerator tests (13 tests) - Add ContentScheduler tests (16 tests) - Add PublisherManager tests (16 tests) - All 45 tests passing Production Docker: - Add docker-compose.prod.yml with healthchecks, resource limits - Add Dockerfile.prod with multi-stage build, non-root user - Add nginx.prod.conf with SSL, rate limiting, security headers - Add .env.prod.example template Maintenance Scripts: - Add backup.sh for database and media backups - Add restore.sh for database restoration - Add cleanup.sh for log rotation and Docker cleanup - Add healthcheck.sh with Telegram alerts Documentation: - Add DEPLOY.md with complete deployment guide Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
121
scripts/maintenance/backup.sh
Executable file
121
scripts/maintenance/backup.sh
Executable file
@@ -0,0 +1,121 @@
|
||||
#!/bin/bash
|
||||
# ===========================================
|
||||
# Backup Script for Social Media Automation
|
||||
# Run daily via cron:
|
||||
# 0 2 * * * /path/to/backup.sh >> /var/log/backup.log 2>&1
|
||||
# ===========================================
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
BACKUP_DIR="${BACKUP_DIR:-/root/Facebook-X-Threads-Automation/backups}"
|
||||
RETENTION_DAYS="${RETENTION_DAYS:-7}"
|
||||
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
|
||||
CONTAINER_NAME="${CONTAINER_NAME:-social-automation-db}"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
log() {
|
||||
echo -e "[$(date '+%Y-%m-%d %H:%M:%S')] $1"
|
||||
}
|
||||
|
||||
error() {
|
||||
log "${RED}ERROR: $1${NC}"
|
||||
exit 1
|
||||
}
|
||||
|
||||
success() {
|
||||
log "${GREEN}$1${NC}"
|
||||
}
|
||||
|
||||
warning() {
|
||||
log "${YELLOW}$1${NC}"
|
||||
}
|
||||
|
||||
# Create backup directory if not exists
|
||||
mkdir -p "$BACKUP_DIR"/{database,media}
|
||||
|
||||
log "Starting backup process..."
|
||||
|
||||
# ===========================================
|
||||
# 1. DATABASE BACKUP
|
||||
# ===========================================
|
||||
log "Backing up PostgreSQL database..."
|
||||
|
||||
DB_BACKUP_FILE="$BACKUP_DIR/database/db_backup_$TIMESTAMP.sql.gz"
|
||||
|
||||
# Check if container is running
|
||||
if ! docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
error "Database container '$CONTAINER_NAME' is not running"
|
||||
fi
|
||||
|
||||
# Get database credentials from container
|
||||
POSTGRES_USER=$(docker exec $CONTAINER_NAME printenv POSTGRES_USER 2>/dev/null || echo "social_user")
|
||||
POSTGRES_DB=$(docker exec $CONTAINER_NAME printenv POSTGRES_DB 2>/dev/null || echo "social_automation")
|
||||
|
||||
# Perform backup
|
||||
if docker exec $CONTAINER_NAME pg_dump -U "$POSTGRES_USER" "$POSTGRES_DB" | gzip > "$DB_BACKUP_FILE"; then
|
||||
DB_SIZE=$(du -h "$DB_BACKUP_FILE" | cut -f1)
|
||||
success "Database backup completed: $DB_BACKUP_FILE ($DB_SIZE)"
|
||||
else
|
||||
error "Database backup failed"
|
||||
fi
|
||||
|
||||
# ===========================================
|
||||
# 2. MEDIA FILES BACKUP
|
||||
# ===========================================
|
||||
log "Backing up media files..."
|
||||
|
||||
MEDIA_BACKUP_FILE="$BACKUP_DIR/media/media_backup_$TIMESTAMP.tar.gz"
|
||||
UPLOADS_DIR="/root/Facebook-X-Threads-Automation/uploads"
|
||||
|
||||
if [ -d "$UPLOADS_DIR" ] && [ "$(ls -A $UPLOADS_DIR 2>/dev/null)" ]; then
|
||||
if tar -czf "$MEDIA_BACKUP_FILE" -C "$(dirname $UPLOADS_DIR)" "$(basename $UPLOADS_DIR)"; then
|
||||
MEDIA_SIZE=$(du -h "$MEDIA_BACKUP_FILE" | cut -f1)
|
||||
success "Media backup completed: $MEDIA_BACKUP_FILE ($MEDIA_SIZE)"
|
||||
else
|
||||
warning "Media backup failed or partially completed"
|
||||
fi
|
||||
else
|
||||
warning "No media files to backup"
|
||||
fi
|
||||
|
||||
# ===========================================
|
||||
# 3. CLEANUP OLD BACKUPS
|
||||
# ===========================================
|
||||
log "Cleaning up backups older than $RETENTION_DAYS days..."
|
||||
|
||||
# Count files before cleanup
|
||||
DB_BEFORE=$(find "$BACKUP_DIR/database" -name "*.sql.gz" -type f 2>/dev/null | wc -l)
|
||||
MEDIA_BEFORE=$(find "$BACKUP_DIR/media" -name "*.tar.gz" -type f 2>/dev/null | wc -l)
|
||||
|
||||
# Delete old files
|
||||
find "$BACKUP_DIR/database" -name "*.sql.gz" -type f -mtime +$RETENTION_DAYS -delete 2>/dev/null || true
|
||||
find "$BACKUP_DIR/media" -name "*.tar.gz" -type f -mtime +$RETENTION_DAYS -delete 2>/dev/null || true
|
||||
|
||||
# Count files after cleanup
|
||||
DB_AFTER=$(find "$BACKUP_DIR/database" -name "*.sql.gz" -type f 2>/dev/null | wc -l)
|
||||
MEDIA_AFTER=$(find "$BACKUP_DIR/media" -name "*.tar.gz" -type f 2>/dev/null | wc -l)
|
||||
|
||||
DB_DELETED=$((DB_BEFORE - DB_AFTER))
|
||||
MEDIA_DELETED=$((MEDIA_BEFORE - MEDIA_AFTER))
|
||||
|
||||
if [ $DB_DELETED -gt 0 ] || [ $MEDIA_DELETED -gt 0 ]; then
|
||||
log "Deleted $DB_DELETED database backup(s) and $MEDIA_DELETED media backup(s)"
|
||||
fi
|
||||
|
||||
# ===========================================
|
||||
# 4. SUMMARY
|
||||
# ===========================================
|
||||
log "─────────────────────────────────────────"
|
||||
log "Backup Summary:"
|
||||
log " Database backups: $DB_AFTER"
|
||||
log " Media backups: $MEDIA_AFTER"
|
||||
log " Total size: $(du -sh $BACKUP_DIR | cut -f1)"
|
||||
log "─────────────────────────────────────────"
|
||||
|
||||
success "Backup process completed successfully!"
|
||||
121
scripts/maintenance/cleanup.sh
Executable file
121
scripts/maintenance/cleanup.sh
Executable file
@@ -0,0 +1,121 @@
|
||||
#!/bin/bash
|
||||
# ===========================================
|
||||
# Cleanup Script for Social Media Automation
|
||||
# Run weekly via cron:
|
||||
# 0 3 * * 0 /path/to/cleanup.sh >> /var/log/cleanup.log 2>&1
|
||||
# ===========================================
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
PROJECT_DIR="${PROJECT_DIR:-/root/Facebook-X-Threads-Automation}"
|
||||
LOG_RETENTION_DAYS="${LOG_RETENTION_DAYS:-30}"
|
||||
DOCKER_LOG_MAX_SIZE="${DOCKER_LOG_MAX_SIZE:-100m}"
|
||||
|
||||
# Colors
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
log() {
|
||||
echo -e "[$(date '+%Y-%m-%d %H:%M:%S')] $1"
|
||||
}
|
||||
|
||||
success() {
|
||||
log "${GREEN}$1${NC}"
|
||||
}
|
||||
|
||||
warning() {
|
||||
log "${YELLOW}$1${NC}"
|
||||
}
|
||||
|
||||
log "Starting cleanup process..."
|
||||
|
||||
# ===========================================
|
||||
# 1. CLEAN DOCKER LOGS
|
||||
# ===========================================
|
||||
log "Cleaning Docker container logs..."
|
||||
|
||||
# Truncate Docker logs (requires root)
|
||||
if [ -d /var/lib/docker/containers ]; then
|
||||
for container_dir in /var/lib/docker/containers/*/; do
|
||||
log_file="${container_dir}*-json.log"
|
||||
for f in $log_file; do
|
||||
if [ -f "$f" ]; then
|
||||
size_before=$(du -h "$f" | cut -f1)
|
||||
if truncate -s 0 "$f" 2>/dev/null; then
|
||||
log " Truncated: $(basename $(dirname $f)) ($size_before)"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
success "Docker logs cleaned"
|
||||
else
|
||||
warning "Docker log directory not found (might need sudo)"
|
||||
fi
|
||||
|
||||
# ===========================================
|
||||
# 2. CLEAN APPLICATION LOGS
|
||||
# ===========================================
|
||||
log "Cleaning application logs older than $LOG_RETENTION_DAYS days..."
|
||||
|
||||
if [ -d "$PROJECT_DIR/logs" ]; then
|
||||
count=$(find "$PROJECT_DIR/logs" -name "*.log" -type f -mtime +$LOG_RETENTION_DAYS 2>/dev/null | wc -l)
|
||||
find "$PROJECT_DIR/logs" -name "*.log" -type f -mtime +$LOG_RETENTION_DAYS -delete 2>/dev/null || true
|
||||
log " Deleted $count old log file(s)"
|
||||
fi
|
||||
|
||||
# ===========================================
|
||||
# 3. CLEAN NGINX LOGS
|
||||
# ===========================================
|
||||
log "Rotating nginx logs..."
|
||||
|
||||
if docker ps --format '{{.Names}}' | grep -q "social-automation-nginx"; then
|
||||
docker exec social-automation-nginx nginx -s reopen 2>/dev/null && \
|
||||
success "Nginx logs rotated" || warning "Could not rotate nginx logs"
|
||||
fi
|
||||
|
||||
# ===========================================
|
||||
# 4. CLEAN DOCKER SYSTEM
|
||||
# ===========================================
|
||||
log "Cleaning Docker system..."
|
||||
|
||||
# Remove unused images, containers, networks
|
||||
docker system prune -f --volumes 2>/dev/null && \
|
||||
success "Docker system cleaned" || warning "Could not clean Docker system"
|
||||
|
||||
# Remove dangling images
|
||||
dangling=$(docker images -f "dangling=true" -q 2>/dev/null | wc -l)
|
||||
if [ $dangling -gt 0 ]; then
|
||||
docker rmi $(docker images -f "dangling=true" -q) 2>/dev/null || true
|
||||
log " Removed $dangling dangling image(s)"
|
||||
fi
|
||||
|
||||
# ===========================================
|
||||
# 5. CLEAN TEMP FILES
|
||||
# ===========================================
|
||||
log "Cleaning temporary files..."
|
||||
|
||||
# Python cache
|
||||
find "$PROJECT_DIR" -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null || true
|
||||
find "$PROJECT_DIR" -type f -name "*.pyc" -delete 2>/dev/null || true
|
||||
find "$PROJECT_DIR" -type f -name "*.pyo" -delete 2>/dev/null || true
|
||||
|
||||
# Pytest cache
|
||||
rm -rf "$PROJECT_DIR/.pytest_cache" 2>/dev/null || true
|
||||
|
||||
success "Temporary files cleaned"
|
||||
|
||||
# ===========================================
|
||||
# 6. DISK USAGE REPORT
|
||||
# ===========================================
|
||||
log "─────────────────────────────────────────"
|
||||
log "Disk Usage Report:"
|
||||
log " Project: $(du -sh $PROJECT_DIR 2>/dev/null | cut -f1)"
|
||||
log " Backups: $(du -sh $PROJECT_DIR/backups 2>/dev/null | cut -f1 || echo 'N/A')"
|
||||
log " Docker: $(docker system df --format '{{.Size}}' 2>/dev/null | head -1 || echo 'N/A')"
|
||||
log " Disk: $(df -h / | awk 'NR==2 {print $4 " free of " $2}')"
|
||||
log "─────────────────────────────────────────"
|
||||
|
||||
success "Cleanup process completed!"
|
||||
154
scripts/maintenance/healthcheck.sh
Executable file
154
scripts/maintenance/healthcheck.sh
Executable file
@@ -0,0 +1,154 @@
|
||||
#!/bin/bash
|
||||
# ===========================================
|
||||
# Health Check Script for Social Media Automation
|
||||
# Run every 5 minutes via cron:
|
||||
# */5 * * * * /path/to/healthcheck.sh
|
||||
# ===========================================
|
||||
|
||||
# Configuration
|
||||
APP_URL="${APP_URL:-http://localhost:8000}"
|
||||
TELEGRAM_BOT_TOKEN="${TELEGRAM_BOT_TOKEN:-}"
|
||||
TELEGRAM_CHAT_ID="${TELEGRAM_CHAT_ID:-}"
|
||||
ALERT_FILE="/tmp/social_automation_alert_sent"
|
||||
|
||||
# Colors
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
log() {
|
||||
echo -e "[$(date '+%Y-%m-%d %H:%M:%S')] $1"
|
||||
}
|
||||
|
||||
send_telegram() {
|
||||
if [ -n "$TELEGRAM_BOT_TOKEN" ] && [ -n "$TELEGRAM_CHAT_ID" ]; then
|
||||
message="$1"
|
||||
curl -s -X POST "https://api.telegram.org/bot${TELEGRAM_BOT_TOKEN}/sendMessage" \
|
||||
-d "chat_id=${TELEGRAM_CHAT_ID}" \
|
||||
-d "text=${message}" \
|
||||
-d "parse_mode=HTML" > /dev/null 2>&1
|
||||
fi
|
||||
}
|
||||
|
||||
check_service() {
|
||||
local name=$1
|
||||
local container=$2
|
||||
|
||||
if docker ps --format '{{.Names}}' | grep -q "^${container}$"; then
|
||||
echo -e "${GREEN}✓${NC} $name"
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}✗${NC} $name"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
log "Running health checks..."
|
||||
|
||||
ERRORS=0
|
||||
STATUS=""
|
||||
|
||||
# ===========================================
|
||||
# 1. CHECK DOCKER CONTAINERS
|
||||
# ===========================================
|
||||
echo ""
|
||||
echo "Container Status:"
|
||||
|
||||
check_service "App (FastAPI)" "social-automation-app" || ((ERRORS++))
|
||||
check_service "Worker (Celery)" "social-automation-worker" || ((ERRORS++))
|
||||
check_service "Beat (Scheduler)" "social-automation-beat" || ((ERRORS++))
|
||||
check_service "Database (PostgreSQL)" "social-automation-db" || ((ERRORS++))
|
||||
check_service "Redis" "social-automation-redis" || ((ERRORS++))
|
||||
check_service "Nginx" "social-automation-nginx" || ((ERRORS++))
|
||||
|
||||
# ===========================================
|
||||
# 2. CHECK API HEALTH
|
||||
# ===========================================
|
||||
echo ""
|
||||
echo "API Status:"
|
||||
|
||||
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" "$APP_URL/api/health" 2>/dev/null || echo "000")
|
||||
|
||||
if [ "$HTTP_CODE" = "200" ]; then
|
||||
echo -e "${GREEN}✓${NC} API responding (HTTP $HTTP_CODE)"
|
||||
else
|
||||
echo -e "${RED}✗${NC} API not responding (HTTP $HTTP_CODE)"
|
||||
((ERRORS++))
|
||||
fi
|
||||
|
||||
# ===========================================
|
||||
# 3. CHECK DATABASE CONNECTION
|
||||
# ===========================================
|
||||
echo ""
|
||||
echo "Database Status:"
|
||||
|
||||
if docker exec social-automation-db pg_isready -U social_user -d social_automation > /dev/null 2>&1; then
|
||||
echo -e "${GREEN}✓${NC} PostgreSQL accepting connections"
|
||||
else
|
||||
echo -e "${RED}✗${NC} PostgreSQL not accepting connections"
|
||||
((ERRORS++))
|
||||
fi
|
||||
|
||||
# ===========================================
|
||||
# 4. CHECK REDIS
|
||||
# ===========================================
|
||||
echo ""
|
||||
echo "Redis Status:"
|
||||
|
||||
if docker exec social-automation-redis redis-cli ping 2>/dev/null | grep -q "PONG"; then
|
||||
echo -e "${GREEN}✓${NC} Redis responding"
|
||||
else
|
||||
echo -e "${RED}✗${NC} Redis not responding"
|
||||
((ERRORS++))
|
||||
fi
|
||||
|
||||
# ===========================================
|
||||
# 5. CHECK DISK SPACE
|
||||
# ===========================================
|
||||
echo ""
|
||||
echo "System Resources:"
|
||||
|
||||
DISK_USAGE=$(df / | awk 'NR==2 {print $5}' | tr -d '%')
|
||||
if [ "$DISK_USAGE" -lt 90 ]; then
|
||||
echo -e "${GREEN}✓${NC} Disk usage: ${DISK_USAGE}%"
|
||||
else
|
||||
echo -e "${RED}✗${NC} Disk usage: ${DISK_USAGE}% (CRITICAL)"
|
||||
((ERRORS++))
|
||||
fi
|
||||
|
||||
# Memory
|
||||
MEM_USAGE=$(free | awk 'NR==2 {printf "%.0f", $3/$2*100}')
|
||||
if [ "$MEM_USAGE" -lt 90 ]; then
|
||||
echo -e "${GREEN}✓${NC} Memory usage: ${MEM_USAGE}%"
|
||||
else
|
||||
echo -e "${YELLOW}!${NC} Memory usage: ${MEM_USAGE}% (HIGH)"
|
||||
fi
|
||||
|
||||
# ===========================================
|
||||
# 6. SUMMARY & ALERTS
|
||||
# ===========================================
|
||||
echo ""
|
||||
echo "─────────────────────────────────────────"
|
||||
|
||||
if [ $ERRORS -eq 0 ]; then
|
||||
echo -e "${GREEN}All systems operational${NC}"
|
||||
|
||||
# Clear alert file if exists (system recovered)
|
||||
if [ -f "$ALERT_FILE" ]; then
|
||||
rm "$ALERT_FILE"
|
||||
send_telegram "✅ <b>Social Media Automation - RECOVERED</b>%0A%0AAll systems are back to normal."
|
||||
fi
|
||||
else
|
||||
echo -e "${RED}$ERRORS error(s) detected${NC}"
|
||||
|
||||
# Send alert only if not already sent
|
||||
if [ ! -f "$ALERT_FILE" ]; then
|
||||
touch "$ALERT_FILE"
|
||||
send_telegram "🚨 <b>Social Media Automation - ALERT</b>%0A%0A$ERRORS service(s) are down!%0ACheck server immediately."
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "─────────────────────────────────────────"
|
||||
|
||||
exit $ERRORS
|
||||
94
scripts/maintenance/restore.sh
Executable file
94
scripts/maintenance/restore.sh
Executable file
@@ -0,0 +1,94 @@
|
||||
#!/bin/bash
|
||||
# ===========================================
|
||||
# Restore Script for Social Media Automation
|
||||
# Usage: ./restore.sh [backup_file]
|
||||
# ===========================================
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
BACKUP_DIR="${BACKUP_DIR:-/root/Facebook-X-Threads-Automation/backups}"
|
||||
CONTAINER_NAME="${CONTAINER_NAME:-social-automation-db}"
|
||||
|
||||
# Colors
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
log() {
|
||||
echo -e "[$(date '+%Y-%m-%d %H:%M:%S')] $1"
|
||||
}
|
||||
|
||||
error() {
|
||||
log "${RED}ERROR: $1${NC}"
|
||||
exit 1
|
||||
}
|
||||
|
||||
success() {
|
||||
log "${GREEN}$1${NC}"
|
||||
}
|
||||
|
||||
warning() {
|
||||
log "${YELLOW}$1${NC}"
|
||||
}
|
||||
|
||||
# Check if backup file provided
|
||||
if [ -z "$1" ]; then
|
||||
log "Available database backups:"
|
||||
echo ""
|
||||
ls -lh "$BACKUP_DIR/database/"*.sql.gz 2>/dev/null || echo " No backups found"
|
||||
echo ""
|
||||
log "Usage: $0 <backup_file.sql.gz>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
BACKUP_FILE="$1"
|
||||
|
||||
# Check if file exists
|
||||
if [ ! -f "$BACKUP_FILE" ]; then
|
||||
# Try with backup dir prefix
|
||||
if [ -f "$BACKUP_DIR/database/$BACKUP_FILE" ]; then
|
||||
BACKUP_FILE="$BACKUP_DIR/database/$BACKUP_FILE"
|
||||
else
|
||||
error "Backup file not found: $BACKUP_FILE"
|
||||
fi
|
||||
fi
|
||||
|
||||
log "Backup file: $BACKUP_FILE"
|
||||
|
||||
# Confirm restore
|
||||
warning "WARNING: This will overwrite the current database!"
|
||||
read -p "Are you sure you want to continue? (yes/no): " CONFIRM
|
||||
|
||||
if [ "$CONFIRM" != "yes" ]; then
|
||||
log "Restore cancelled"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if container is running
|
||||
if ! docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
error "Database container '$CONTAINER_NAME' is not running"
|
||||
fi
|
||||
|
||||
# Get database credentials
|
||||
POSTGRES_USER=$(docker exec $CONTAINER_NAME printenv POSTGRES_USER 2>/dev/null || echo "social_user")
|
||||
POSTGRES_DB=$(docker exec $CONTAINER_NAME printenv POSTGRES_DB 2>/dev/null || echo "social_automation")
|
||||
|
||||
log "Restoring database..."
|
||||
|
||||
# Drop existing connections and recreate database
|
||||
docker exec $CONTAINER_NAME psql -U "$POSTGRES_USER" -c "
|
||||
SELECT pg_terminate_backend(pg_stat_activity.pid)
|
||||
FROM pg_stat_activity
|
||||
WHERE pg_stat_activity.datname = '$POSTGRES_DB'
|
||||
AND pid <> pg_backend_pid();" postgres 2>/dev/null || true
|
||||
|
||||
# Restore
|
||||
if gunzip -c "$BACKUP_FILE" | docker exec -i $CONTAINER_NAME psql -U "$POSTGRES_USER" "$POSTGRES_DB"; then
|
||||
success "Database restored successfully!"
|
||||
else
|
||||
error "Database restore failed"
|
||||
fi
|
||||
|
||||
log "Restore completed. Please restart the application containers."
|
||||
Reference in New Issue
Block a user