Files
motovaultpro/scripts/refresh-staging-db.sh
Eric Gullickson ab682da1f1 docs: update SSH setup instructions in refresh-staging-db.sh
Add detailed step-by-step instructions for setting up SSH key-based
authentication from staging to production, including proper directory
and file permissions (0700 for .ssh, 0600 for authorized_keys).

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-02-01 15:43:55 -06:00

356 lines
12 KiB
Bash
Executable File

#!/bin/bash
set -e
# Staging Database Refresh Script for MotoVaultPro
# Copies production database to staging (non-interactive)
# Usage: ./scripts/refresh-staging-db.sh [options]
#
# Prerequisites:
# SSH key-based access from staging act_runner to production act_runner.
#
# On STAGING (as act_runner):
# ssh-keygen -t ed25519 -N "" -f ~/.ssh/id_ed25519
# cat ~/.ssh/id_ed25519.pub # Copy this output
#
# On PRODUCTION (as root or sudo):
# sudo -u act_runner mkdir -p ~/.ssh
# sudo chmod 700 /home/act_runner/.ssh
# sudo -u act_runner touch ~/.ssh/authorized_keys
# sudo chmod 600 /home/act_runner/.ssh/authorized_keys
# echo "PASTE_PUBLIC_KEY_HERE" | sudo tee -a /home/act_runner/.ssh/authorized_keys
#
# Verify from STAGING:
# ssh act_runner@172.30.1.36 echo "SSH OK"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
# Server configuration
PRODUCTION_HOST="172.30.1.36"
PRODUCTION_USER="act_runner"
PRODUCTION_CONTAINER="mvp-postgres"
STAGING_CONTAINER="mvp-postgres-staging"
STAGING_BACKEND_CONTAINER="mvp-backend-staging"
# Database configuration
DATABASE_NAME="motovaultpro"
DATABASE_USER="postgres"
# Paths
BACKUP_DIR="/tmp/mvp-db-refresh"
STAGING_BACKUP="${BACKUP_DIR}/staging_backup_${TIMESTAMP}.sql.gz"
PRODUCTION_DUMP="${BACKUP_DIR}/production_dump_${TIMESTAMP}.sql"
# Options
DRY_RUN=false
SKIP_BACKUP=false
KEEP_DUMP=false
# Function to print colored output
print_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
print_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_step() {
echo -e "${BLUE}[STEP]${NC} $1"
}
print_dry() {
echo -e "${YELLOW}[DRY-RUN]${NC} Would: $1"
}
# Function to show usage
show_usage() {
cat << EOF
Staging Database Refresh Script for MotoVaultPro
Copies the production database to staging. This script must be run on the
staging server and requires SSH access to the production server.
Usage: $0 [options]
Options:
-h, --help Show this help message
--dry-run Show what would happen without making changes
--skip-backup Skip staging backup (faster, less safe)
--keep-dump Keep the production dump file after import
Prerequisites:
SSH key-based access from staging to production.
# On STAGING (as act_runner):
ssh-keygen -t ed25519 -N "" -f ~/.ssh/id_ed25519
cat ~/.ssh/id_ed25519.pub # Copy this output
# On PRODUCTION (as root or sudo):
sudo -u act_runner mkdir -p ~/.ssh
sudo chmod 700 /home/act_runner/.ssh
sudo -u act_runner touch ~/.ssh/authorized_keys
sudo chmod 600 /home/act_runner/.ssh/authorized_keys
echo "PASTE_KEY" | sudo tee -a /home/act_runner/.ssh/authorized_keys
# Verify from STAGING:
ssh ${PRODUCTION_USER}@${PRODUCTION_HOST} echo "SSH OK"
Examples:
# Preview what would happen
$0 --dry-run
# Full refresh (recommended)
$0
# Quick refresh without staging backup
$0 --skip-backup
EOF
exit 0
}
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
-h|--help)
show_usage
;;
--dry-run)
DRY_RUN=true
shift
;;
--skip-backup)
SKIP_BACKUP=true
shift
;;
--keep-dump)
KEEP_DUMP=true
shift
;;
*)
print_error "Unknown option: $1"
show_usage
;;
esac
done
# Cleanup function
cleanup() {
local exit_code=$?
if [ "$KEEP_DUMP" = false ] && [ -f "$PRODUCTION_DUMP" ]; then
rm -f "$PRODUCTION_DUMP"
fi
if [ $exit_code -ne 0 ]; then
print_error "Script failed with exit code $exit_code"
if [ -f "$STAGING_BACKUP" ]; then
print_warn "Staging backup available at: $STAGING_BACKUP"
print_warn "To restore: gunzip -c $STAGING_BACKUP | docker exec -i $STAGING_CONTAINER psql -U $DATABASE_USER -d $DATABASE_NAME"
fi
fi
exit $exit_code
}
trap cleanup EXIT
# Header
echo ""
echo -e "${BLUE}========================================${NC}"
echo -e "${BLUE} MotoVaultPro Staging Database Refresh${NC}"
echo -e "${BLUE}========================================${NC}"
echo ""
if [ "$DRY_RUN" = true ]; then
print_warn "DRY RUN MODE - No changes will be made"
echo ""
fi
# Step 1: Validate prerequisites
print_step "1/8 Validating prerequisites..."
# Check SSH to production
print_info "Testing SSH connection to production..."
if [ "$DRY_RUN" = true ]; then
print_dry "ssh ${PRODUCTION_USER}@${PRODUCTION_HOST} echo 'OK'"
else
if ! ssh -o BatchMode=yes -o ConnectTimeout=5 "${PRODUCTION_USER}@${PRODUCTION_HOST}" "echo 'OK'" > /dev/null 2>&1; then
print_error "Cannot SSH to production server (${PRODUCTION_USER}@${PRODUCTION_HOST})"
print_error "Ensure SSH key is set up. See script header for full instructions."
print_error "Quick check: Does ~/.ssh/id_ed25519 exist? Is your public key in production's authorized_keys?"
exit 1
fi
print_info "SSH connection OK"
fi
# Check production container
print_info "Checking production PostgreSQL container..."
if [ "$DRY_RUN" = true ]; then
print_dry "ssh ${PRODUCTION_USER}@${PRODUCTION_HOST} docker ps --filter name=${PRODUCTION_CONTAINER}"
else
if ! ssh "${PRODUCTION_USER}@${PRODUCTION_HOST}" "docker ps --format '{{.Names}}' | grep -q '^${PRODUCTION_CONTAINER}$'"; then
print_error "Production PostgreSQL container '${PRODUCTION_CONTAINER}' is not running"
exit 1
fi
print_info "Production container OK"
fi
# Check staging container
print_info "Checking staging PostgreSQL container..."
if [ "$DRY_RUN" = true ]; then
print_dry "docker ps --filter name=${STAGING_CONTAINER}"
else
if ! docker ps --format '{{.Names}}' | grep -q "^${STAGING_CONTAINER}$"; then
print_error "Staging PostgreSQL container '${STAGING_CONTAINER}' is not running"
exit 1
fi
print_info "Staging container OK"
fi
# Create backup directory
if [ "$DRY_RUN" = false ]; then
mkdir -p "$BACKUP_DIR"
fi
# Step 2: Backup staging database
print_step "2/8 Backing up staging database..."
if [ "$SKIP_BACKUP" = true ]; then
print_warn "Skipping staging backup (--skip-backup)"
elif [ "$DRY_RUN" = true ]; then
print_dry "docker exec ${STAGING_CONTAINER} pg_dump -U ${DATABASE_USER} -d ${DATABASE_NAME} | gzip > ${STAGING_BACKUP}"
else
docker exec "$STAGING_CONTAINER" pg_dump -U "$DATABASE_USER" -d "$DATABASE_NAME" 2>/dev/null | gzip > "$STAGING_BACKUP"
BACKUP_SIZE=$(du -h "$STAGING_BACKUP" | cut -f1)
print_info "Staging backup created: $STAGING_BACKUP ($BACKUP_SIZE)"
fi
# Step 3: Export production database via SSH
print_step "3/8 Exporting production database..."
if [ "$DRY_RUN" = true ]; then
print_dry "ssh ${PRODUCTION_USER}@${PRODUCTION_HOST} docker exec ${PRODUCTION_CONTAINER} pg_dump -U ${DATABASE_USER} -d ${DATABASE_NAME} > ${PRODUCTION_DUMP}"
else
print_info "Streaming production database (this may take a while)..."
ssh "${PRODUCTION_USER}@${PRODUCTION_HOST}" "docker exec ${PRODUCTION_CONTAINER} pg_dump -U ${DATABASE_USER} -d ${DATABASE_NAME}" > "$PRODUCTION_DUMP"
DUMP_SIZE=$(du -h "$PRODUCTION_DUMP" | cut -f1)
print_info "Production dump received: $PRODUCTION_DUMP ($DUMP_SIZE)"
fi
# Step 4: Stop staging backend
print_step "4/8 Stopping staging backend..."
if [ "$DRY_RUN" = true ]; then
print_dry "docker stop ${STAGING_BACKEND_CONTAINER}"
else
if docker ps --format '{{.Names}}' | grep -q "^${STAGING_BACKEND_CONTAINER}$"; then
docker stop "$STAGING_BACKEND_CONTAINER" > /dev/null
print_info "Staging backend stopped"
else
print_warn "Staging backend not running, skipping stop"
fi
fi
# Step 5: Drop and recreate staging database
print_step "5/8 Recreating staging database..."
if [ "$DRY_RUN" = true ]; then
print_dry "docker exec ${STAGING_CONTAINER} psql -U ${DATABASE_USER} -c 'DROP DATABASE IF EXISTS ${DATABASE_NAME}'"
print_dry "docker exec ${STAGING_CONTAINER} psql -U ${DATABASE_USER} -c 'CREATE DATABASE ${DATABASE_NAME}'"
else
# Terminate existing connections
docker exec "$STAGING_CONTAINER" psql -U "$DATABASE_USER" -c "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = '${DATABASE_NAME}' AND pid <> pg_backend_pid();" > /dev/null 2>&1 || true
# Drop and create
docker exec "$STAGING_CONTAINER" psql -U "$DATABASE_USER" -c "DROP DATABASE IF EXISTS ${DATABASE_NAME}" > /dev/null
docker exec "$STAGING_CONTAINER" psql -U "$DATABASE_USER" -c "CREATE DATABASE ${DATABASE_NAME}" > /dev/null
print_info "Staging database recreated"
fi
# Step 6: Import production dump
print_step "6/8 Importing production data..."
if [ "$DRY_RUN" = true ]; then
print_dry "docker exec -i ${STAGING_CONTAINER} psql -U ${DATABASE_USER} -d ${DATABASE_NAME} < ${PRODUCTION_DUMP}"
else
print_info "Importing data (this may take a while)..."
# Import with error suppression for non-critical issues (like role doesn't exist)
docker exec -i "$STAGING_CONTAINER" psql -U "$DATABASE_USER" -d "$DATABASE_NAME" < "$PRODUCTION_DUMP" 2>&1 | grep -v "^ERROR: role" || true
print_info "Import complete"
fi
# Step 7: Restart staging backend
print_step "7/8 Restarting staging backend..."
if [ "$DRY_RUN" = true ]; then
print_dry "docker start ${STAGING_BACKEND_CONTAINER}"
else
docker start "$STAGING_BACKEND_CONTAINER" > /dev/null 2>&1 || true
print_info "Staging backend started"
# Wait for backend to be healthy
print_info "Waiting for backend to be ready..."
for i in {1..30}; do
if docker exec "$STAGING_BACKEND_CONTAINER" curl -sf http://localhost:3000/api/health > /dev/null 2>&1; then
print_info "Backend is healthy"
break
fi
if [ $i -eq 30 ]; then
print_warn "Backend health check timed out (may still be starting)"
fi
sleep 2
done
fi
# Step 8: Verify and cleanup
print_step "8/8 Verifying refresh..."
if [ "$DRY_RUN" = true ]; then
print_dry "docker exec ${STAGING_CONTAINER} psql -U ${DATABASE_USER} -d ${DATABASE_NAME} -c 'SELECT COUNT(*) FROM information_schema.tables'"
else
# Get table count
TABLE_COUNT=$(docker exec "$STAGING_CONTAINER" psql -U "$DATABASE_USER" -d "$DATABASE_NAME" -tAc "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'public'")
# Get row counts for key tables
print_info "Database statistics:"
echo ""
docker exec "$STAGING_CONTAINER" psql -U "$DATABASE_USER" -d "$DATABASE_NAME" -c "
SELECT
schemaname || '.' || relname AS table,
n_live_tup AS rows
FROM pg_stat_user_tables
ORDER BY n_live_tup DESC
LIMIT 10;
" 2>/dev/null || true
echo ""
# Cleanup dump file
if [ "$KEEP_DUMP" = false ]; then
rm -f "$PRODUCTION_DUMP"
print_info "Production dump cleaned up"
else
print_info "Production dump kept at: $PRODUCTION_DUMP"
fi
fi
# Summary
echo ""
echo -e "${GREEN}========================================${NC}"
echo -e "${GREEN} Staging Database Refresh Complete!${NC}"
echo -e "${GREEN}========================================${NC}"
echo ""
if [ "$DRY_RUN" = true ]; then
print_warn "This was a dry run. No changes were made."
else
print_info "Production data has been copied to staging"
print_info "Tables: $TABLE_COUNT"
if [ "$SKIP_BACKUP" = false ] && [ -f "$STAGING_BACKUP" ]; then
print_info "Previous staging backup: $STAGING_BACKUP"
fi
fi
echo ""