Files
ATLAS/deploy/scripts/backup.sh
FlotillasGPS Developer 51d78bacf4 FlotillasGPS - Sistema completo de monitoreo de flotillas GPS
Sistema completo para monitoreo y gestion de flotas de vehiculos con:
- Backend FastAPI con PostgreSQL/TimescaleDB
- Frontend React con TypeScript y TailwindCSS
- App movil React Native con Expo
- Soporte para dispositivos GPS, Meshtastic y celulares
- Video streaming en vivo con MediaMTX
- Geocercas, alertas, viajes y reportes
- Autenticacion JWT y WebSockets en tiempo real

Documentacion completa y guias de usuario incluidas.
2026-01-21 08:18:00 +00:00

487 lines
13 KiB
Bash

#!/bin/bash
# ============================================
# Sistema de Flotillas - Script de Backup
# ============================================
# Realiza backup de base de datos y configuracion
#
# Uso: ./backup.sh [--full] [--upload] [--keep-days N]
#
# Opciones:
# --full Incluir backup completo de archivos
# --upload Subir a S3/remote despues del backup
# --keep-days Dias de retencion (default: 7)
# ============================================
set -e
set -o pipefail
# ---------------------------------------------
# Colores
# ---------------------------------------------
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
# ---------------------------------------------
# Variables de Configuracion
# ---------------------------------------------
INSTALL_DIR="${INSTALL_DIR:-/opt/flotillas}"
BACKUP_DIR="${BACKUP_DIR:-/var/backups/flotillas}"
RETENTION_DAYS="${BACKUP_RETENTION_DAYS:-7}"
# Cargar variables de entorno
if [[ -f "$INSTALL_DIR/.env" ]]; then
export $(grep -v '^#' "$INSTALL_DIR/.env" | xargs)
fi
# Base de datos
DB_HOST="${POSTGRES_HOST:-localhost}"
DB_PORT="${POSTGRES_PORT:-5432}"
DB_NAME="${POSTGRES_DB:-flotillas}"
DB_USER="${POSTGRES_USER:-flotillas}"
DB_PASSWORD="${POSTGRES_PASSWORD:-}"
# S3 (opcional)
S3_ENABLED="${S3_ENABLED:-false}"
S3_BUCKET="${S3_BUCKET:-}"
S3_ENDPOINT="${S3_ENDPOINT:-https://s3.amazonaws.com}"
# Timestamp para este backup
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_NAME="flotillas_${TIMESTAMP}"
# Flags
FULL_BACKUP=false
UPLOAD_BACKUP=false
# ---------------------------------------------
# Funciones
# ---------------------------------------------
log_info() {
echo -e "${BLUE}[$(date '+%Y-%m-%d %H:%M:%S')] [INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[$(date '+%Y-%m-%d %H:%M:%S')] [OK]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[$(date '+%Y-%m-%d %H:%M:%S')] [WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[$(date '+%Y-%m-%d %H:%M:%S')] [ERROR]${NC} $1"
}
# Calcular tamanio de archivo
get_file_size() {
du -h "$1" 2>/dev/null | cut -f1
}
# Verificar espacio disponible
check_disk_space() {
local required_gb=$1
local free_space=$(df -BG "$BACKUP_DIR" | awk 'NR==2 {print $4}' | tr -d 'G')
if [[ $free_space -lt $required_gb ]]; then
log_error "Espacio insuficiente: ${free_space}GB disponible, se requieren ${required_gb}GB"
return 1
fi
return 0
}
# ---------------------------------------------
# Parsear argumentos
# ---------------------------------------------
parse_args() {
while [[ $# -gt 0 ]]; do
case $1 in
--full)
FULL_BACKUP=true
shift
;;
--upload)
UPLOAD_BACKUP=true
shift
;;
--keep-days)
RETENTION_DAYS="$2"
shift 2
;;
--help|-h)
echo "Uso: $0 [--full] [--upload] [--keep-days N]"
echo ""
echo "Opciones:"
echo " --full Backup completo (DB + archivos)"
echo " --upload Subir a S3 despues del backup"
echo " --keep-days Dias de retencion (default: 7)"
exit 0
;;
*)
log_error "Opcion desconocida: $1"
exit 1
;;
esac
done
}
# ---------------------------------------------
# Crear directorio de backup
# ---------------------------------------------
prepare_backup_dir() {
log_info "Preparando directorio de backup..."
# Crear directorio si no existe
mkdir -p "$BACKUP_DIR"
mkdir -p "$BACKUP_DIR/daily"
mkdir -p "$BACKUP_DIR/temp"
# Verificar permisos
if [[ ! -w "$BACKUP_DIR" ]]; then
log_error "No hay permisos de escritura en $BACKUP_DIR"
exit 1
fi
# Verificar espacio (minimo 5GB)
check_disk_space 5 || exit 1
log_success "Directorio listo: $BACKUP_DIR"
}
# ---------------------------------------------
# Backup de PostgreSQL
# ---------------------------------------------
backup_database() {
log_info "Iniciando backup de PostgreSQL..."
local db_backup_file="$BACKUP_DIR/temp/${BACKUP_NAME}_db.sql"
local db_backup_compressed="$BACKUP_DIR/daily/${BACKUP_NAME}_db.sql.gz"
# Configurar password para pg_dump
export PGPASSWORD="$DB_PASSWORD"
# Realizar dump
log_info "Exportando base de datos ${DB_NAME}..."
pg_dump \
-h "$DB_HOST" \
-p "$DB_PORT" \
-U "$DB_USER" \
-d "$DB_NAME" \
--format=plain \
--no-owner \
--no-acl \
--verbose \
-f "$db_backup_file" 2>/dev/null
# Comprimir
log_info "Comprimiendo backup..."
gzip -9 -c "$db_backup_file" > "$db_backup_compressed"
# Limpiar archivo temporal
rm -f "$db_backup_file"
# Limpiar variable de password
unset PGPASSWORD
local size=$(get_file_size "$db_backup_compressed")
log_success "Backup de BD completado: $db_backup_compressed ($size)"
# Backup de Traccar DB si existe
if [[ -n "${TRACCAR_DB_NAME:-}" ]]; then
log_info "Exportando base de datos Traccar..."
local traccar_backup="$BACKUP_DIR/daily/${BACKUP_NAME}_traccar.sql.gz"
export PGPASSWORD="$DB_PASSWORD"
pg_dump \
-h "$DB_HOST" \
-p "$DB_PORT" \
-U "$DB_USER" \
-d "${TRACCAR_DB_NAME}" \
--format=plain \
--no-owner \
--no-acl \
2>/dev/null | gzip -9 > "$traccar_backup"
unset PGPASSWORD
local traccar_size=$(get_file_size "$traccar_backup")
log_success "Backup de Traccar completado: $traccar_backup ($traccar_size)"
fi
}
# ---------------------------------------------
# Backup de configuracion
# ---------------------------------------------
backup_config() {
log_info "Respaldando archivos de configuracion..."
local config_backup="$BACKUP_DIR/daily/${BACKUP_NAME}_config.tar.gz"
# Lista de archivos a respaldar
local files_to_backup=(
"$INSTALL_DIR/.env"
"$INSTALL_DIR/deploy"
"/opt/traccar/conf/traccar.xml"
"/opt/mediamtx/mediamtx.yml"
"/etc/mosquitto/conf.d/flotillas.conf"
"/etc/systemd/system/flotillas-*.service"
"/etc/systemd/system/mediamtx.service"
)
# Crear archivo temporal con lista de archivos existentes
local file_list=$(mktemp)
for file in "${files_to_backup[@]}"; do
if [[ -e "$file" ]]; then
echo "$file" >> "$file_list"
fi
done
# Crear tarball
tar -czf "$config_backup" -T "$file_list" 2>/dev/null || true
# Limpiar
rm -f "$file_list"
local size=$(get_file_size "$config_backup")
log_success "Backup de configuracion completado: $config_backup ($size)"
}
# ---------------------------------------------
# Backup completo (archivos)
# ---------------------------------------------
backup_full() {
if [[ "$FULL_BACKUP" != "true" ]]; then
return
fi
log_info "Iniciando backup completo de archivos..."
local full_backup="$BACKUP_DIR/daily/${BACKUP_NAME}_full.tar.gz"
# Excluir directorios grandes innecesarios
tar -czf "$full_backup" \
--exclude="$INSTALL_DIR/backend/venv" \
--exclude="$INSTALL_DIR/frontend/node_modules" \
--exclude="$INSTALL_DIR/.git" \
--exclude="*.log" \
--exclude="*.pyc" \
--exclude="__pycache__" \
"$INSTALL_DIR" 2>/dev/null
local size=$(get_file_size "$full_backup")
log_success "Backup completo: $full_backup ($size)"
}
# ---------------------------------------------
# Rotar backups antiguos
# ---------------------------------------------
rotate_backups() {
log_info "Rotando backups antiguos (retencion: ${RETENTION_DAYS} dias)..."
local deleted=0
# Encontrar y eliminar backups mas antiguos que RETENTION_DAYS
while IFS= read -r -d '' file; do
rm -f "$file"
((deleted++))
done < <(find "$BACKUP_DIR/daily" -type f -name "flotillas_*.gz" -mtime +${RETENTION_DAYS} -print0 2>/dev/null)
if [[ $deleted -gt 0 ]]; then
log_info "Eliminados $deleted backups antiguos"
fi
# Mostrar espacio usado
local space_used=$(du -sh "$BACKUP_DIR" 2>/dev/null | cut -f1)
log_info "Espacio total usado por backups: $space_used"
}
# ---------------------------------------------
# Subir a S3
# ---------------------------------------------
upload_to_s3() {
if [[ "$UPLOAD_BACKUP" != "true" ]]; then
return
fi
if [[ "$S3_ENABLED" != "true" ]]; then
log_warn "S3 no esta habilitado. Configura S3_ENABLED=true en .env"
return
fi
if ! command -v aws &> /dev/null; then
log_warn "AWS CLI no instalado. Instalando..."
pip3 install awscli -q
fi
log_info "Subiendo backups a S3..."
# Configurar credenciales
export AWS_ACCESS_KEY_ID="${S3_ACCESS_KEY}"
export AWS_SECRET_ACCESS_KEY="${S3_SECRET_KEY}"
# Subir archivos del dia
for file in "$BACKUP_DIR/daily/${BACKUP_NAME}"*.gz; do
if [[ -f "$file" ]]; then
local filename=$(basename "$file")
log_info "Subiendo: $filename"
aws s3 cp "$file" "s3://${S3_BUCKET}/backups/$(date +%Y/%m)/${filename}" \
--endpoint-url "$S3_ENDPOINT" \
--quiet
log_success "Subido: $filename"
fi
done
# Limpiar credenciales
unset AWS_ACCESS_KEY_ID
unset AWS_SECRET_ACCESS_KEY
log_success "Backup subido a S3"
}
# ---------------------------------------------
# Crear indice de backups
# ---------------------------------------------
create_backup_index() {
log_info "Actualizando indice de backups..."
local index_file="$BACKUP_DIR/backup_index.txt"
# Cabecera
cat > "$index_file" <<EOF
# Indice de Backups - Sistema de Flotillas
# Generado: $(date)
# Retencion: ${RETENTION_DAYS} dias
#
# Formato: fecha | tipo | archivo | tamanio
#
EOF
# Listar backups
for file in $(ls -t "$BACKUP_DIR/daily"/*.gz 2>/dev/null); do
local filename=$(basename "$file")
local size=$(get_file_size "$file")
local date=$(stat -c %y "$file" 2>/dev/null | cut -d' ' -f1)
local type="unknown"
case "$filename" in
*_db.sql.gz) type="database" ;;
*_config.tar.gz) type="config" ;;
*_full.tar.gz) type="full" ;;
*_traccar.sql.gz) type="traccar" ;;
esac
echo "$date | $type | $filename | $size" >> "$index_file"
done
log_success "Indice actualizado: $index_file"
}
# ---------------------------------------------
# Verificar integridad del backup
# ---------------------------------------------
verify_backup() {
log_info "Verificando integridad de backups..."
local errors=0
for file in "$BACKUP_DIR/daily/${BACKUP_NAME}"*.gz; do
if [[ -f "$file" ]]; then
if gzip -t "$file" 2>/dev/null; then
log_success "OK: $(basename "$file")"
else
log_error "CORRUPTO: $(basename "$file")"
((errors++))
fi
fi
done
if [[ $errors -gt 0 ]]; then
log_error "Se encontraron $errors archivos corruptos"
return 1
fi
return 0
}
# ---------------------------------------------
# Enviar notificacion
# ---------------------------------------------
send_notification() {
local status="$1"
local message="$2"
# Telegram (si esta configurado)
if [[ -n "${TELEGRAM_BOT_TOKEN:-}" ]] && [[ -n "${TELEGRAM_CHAT_ID:-}" ]]; then
local emoji="✅"
[[ "$status" == "error" ]] && emoji="❌"
curl -s -X POST \
"https://api.telegram.org/bot${TELEGRAM_BOT_TOKEN}/sendMessage" \
-d chat_id="${TELEGRAM_CHAT_ID}" \
-d text="${emoji} Backup Flotillas: ${message}" \
> /dev/null 2>&1
fi
}
# ---------------------------------------------
# Mostrar resumen
# ---------------------------------------------
show_summary() {
echo ""
log_success "=========================================="
log_success "BACKUP COMPLETADO"
log_success "=========================================="
echo ""
echo "Archivos creados:"
for file in "$BACKUP_DIR/daily/${BACKUP_NAME}"*.gz; do
if [[ -f "$file" ]]; then
echo " - $(basename "$file") ($(get_file_size "$file"))"
fi
done
echo ""
echo "Ubicacion: $BACKUP_DIR/daily/"
echo "Retencion: ${RETENTION_DAYS} dias"
echo ""
}
# ---------------------------------------------
# Main
# ---------------------------------------------
main() {
parse_args "$@"
log_info "=========================================="
log_info "INICIANDO BACKUP - $(date)"
log_info "=========================================="
# Ejecutar pasos
prepare_backup_dir
backup_database
backup_config
backup_full
rotate_backups
verify_backup || true
upload_to_s3
create_backup_index
show_summary
# Notificar exito
send_notification "success" "Backup completado exitosamente"
}
# Manejo de errores global
trap 'log_error "Backup fallido en linea $LINENO"; send_notification "error" "Backup fallido"; exit 1' ERR
# Ejecutar
main "$@"