Database Automation
Database migration scripts, automated backups, performance monitoring, and query optimization
PostgreSQL Automated Backup Script
#!/bin/bash
# Automated PostgreSQL backup with S3 upload
DB_NAME="production_db"
DB_USER="postgres"
BACKUP_DIR="/var/backups/postgres"
S3_BUCKET="s3://my-backups/postgres"
RETENTION_DAYS=30
# Create timestamp
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_FILE="$BACKUP_DIR/${DB_NAME}_${TIMESTAMP}.sql.gz"
# Create backup
pg_dump -U $DB_USER $DB_NAME | gzip > $BACKUP_FILE
# Upload to S3
aws s3 cp $BACKUP_FILE $S3_BUCKET/
# Clean old backups
find $BACKUP_DIR -name "*.sql.gz" -mtime +$RETENTION_DAYS -delete
aws s3 ls $S3_BUCKET/ | while read -r line; do
createDate=$(echo $line | awk '{print $1" "$2}')
olderThan=$(date -d "$RETENTION_DAYS days ago" +%s)
if [[ $(date -d "$createDate" +%s) -lt $olderThan ]]; then
fileName=$(echo $line | awk '{print $4}')
aws s3 rm "$S3_BUCKET/$fileName"
fi
done
echo "Backup completed: $BACKUP_FILE"Database Migration with Prisma
// prisma/schema.prisma
generator client {
provider = "prisma-client-js"
}
datasource db {
provider = "postgresql"
url = env("DATABASE_URL")
}
model User {
id Int @id @default(autoincrement())
email String @unique
name String?
posts Post[]
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
}
model Post {
id Int @id @default(autoincrement())
title String
content String?
published Boolean @default(false)
author User @relation(fields: [authorId], references: [id])
authorId Int
createdAt DateTime @default(now())
}
// Run migrations:
// npx prisma migrate dev --name init
// npx prisma migrate deploy (production)Query Performance Monitoring
-- PostgreSQL slow query logging
ALTER SYSTEM SET log_min_duration_statement = 1000; -- Log queries > 1s
ALTER SYSTEM SET log_statement = 'all';
SELECT pg_reload_conf();
-- Find slow queries
SELECT
query,
calls,
total_time / 1000 as total_seconds,
mean_time / 1000 as mean_seconds,
rows
FROM pg_stat_statements
ORDER BY total_time DESC
LIMIT 20;
-- Find missing indexes
SELECT
schemaname, tablename,
seq_scan, seq_tup_read,
idx_scan, idx_tup_fetch
FROM pg_stat_user_tables
WHERE seq_scan > idx_scan
ORDER BY seq_tup_read DESC;
-- Table bloat analysis
SELECT
tablename,
pg_size_pretty(pg_total_relation_size(tablename::regclass)) as size,
n_dead_tup as dead_tuples,
last_vacuum, last_autovacuum
FROM pg_stat_user_tables
ORDER BY n_dead_tup DESC;Backup Best Practices
- • Automate daily backups with cron
- • Store backups in multiple locations
- • Test restoration regularly
- • Encrypt backups at rest
- • Implement point-in-time recovery
Performance Optimization
- • Add indexes for frequent queries
- • Use connection pooling
- • Optimize query plans with EXPLAIN
- • Regular VACUUM and ANALYZE
- • Monitor and tune memory settings