Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .codex/settings/kiroCodex-settings.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"paths": {
"specs": ".codex/specs",
"steering": ".codex/steering"
}
}
1 change: 1 addition & 0 deletions migrations/20250215000000_enable_pgcrypto.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
DROP EXTENSION IF EXISTS "pgcrypto";
3 changes: 3 additions & 0 deletions migrations/20250216000000_init.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
DROP INDEX IF EXISTS idx_transactions_stellar_account;
DROP INDEX IF EXISTS idx_transactions_status;
DROP TABLE IF EXISTS transactions;
25 changes: 25 additions & 0 deletions migrations/20250217000000_partition_transactions.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
-- Reverse the partitioned table migration: restore the plain transactions table
DROP FUNCTION IF EXISTS maintain_partitions();
DROP FUNCTION IF EXISTS detach_old_partitions(INTEGER);
DROP FUNCTION IF EXISTS create_monthly_partition();

-- Drop all partition children (dynamically, to handle any auto-created ones)
DO $$
DECLARE
r RECORD;
BEGIN
FOR r IN
SELECT c.relname
FROM pg_class c
JOIN pg_inherits i ON c.oid = i.inhrelid
JOIN pg_class p ON i.inhparent = p.oid
WHERE p.relname = 'transactions'
LOOP
EXECUTE format('DROP TABLE IF EXISTS %I', r.relname);
END LOOP;
END $$;

DROP TABLE IF EXISTS transactions;

-- Restore the original non-partitioned table if it still exists
ALTER TABLE IF EXISTS transactions_old RENAME TO transactions;
2 changes: 2 additions & 0 deletions migrations/20250218000000_feature_flags.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
DROP INDEX IF EXISTS idx_feature_flags_enabled;
DROP TABLE IF EXISTS feature_flags;
2 changes: 2 additions & 0 deletions migrations/20260219000000_add_assets_table.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
DROP INDEX IF EXISTS idx_assets_code_issuer;
DROP TABLE IF EXISTS assets;
3 changes: 3 additions & 0 deletions migrations/20260220000000_settlements.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
DROP INDEX IF EXISTS idx_transactions_settlement_id;
ALTER TABLE transactions DROP COLUMN IF EXISTS settlement_id;
DROP TABLE IF EXISTS settlements;
7 changes: 7 additions & 0 deletions migrations/20260220000001_audit_logs.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
DROP INDEX IF EXISTS idx_audit_logs_entity_timestamp;
DROP INDEX IF EXISTS idx_audit_logs_action;
DROP INDEX IF EXISTS idx_audit_logs_actor;
DROP INDEX IF EXISTS idx_audit_logs_timestamp;
DROP INDEX IF EXISTS idx_audit_logs_entity_type;
DROP INDEX IF EXISTS idx_audit_logs_entity_id;
DROP TABLE IF EXISTS audit_logs;
4 changes: 4 additions & 0 deletions migrations/20260220143500_transaction_dlq.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
DROP INDEX IF EXISTS idx_transaction_dlq_stellar_account;
DROP INDEX IF EXISTS idx_transaction_dlq_moved_at;
DROP INDEX IF EXISTS idx_transaction_dlq_transaction_id;
DROP TABLE IF EXISTS transaction_dlq;
5 changes: 5 additions & 0 deletions migrations/20260222000000_transaction_search_indexes.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
DROP INDEX IF EXISTS idx_transactions_search;
DROP INDEX IF EXISTS idx_transactions_created_id;
DROP INDEX IF EXISTS idx_transactions_amount;
DROP INDEX IF EXISTS idx_transactions_created_status;
DROP INDEX IF EXISTS idx_transactions_asset_code;
4 changes: 4 additions & 0 deletions migrations/20260222000001_transaction_memo_metadata.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
ALTER TABLE transactions
DROP COLUMN IF EXISTS metadata,
DROP COLUMN IF EXISTS memo_type,
DROP COLUMN IF EXISTS memo;
218 changes: 218 additions & 0 deletions tests/migration_tests.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,218 @@
//! Migration safety tests (issue #25)
//!
//! For every `<timestamp>_<name>.sql` up-migration there must be a matching
//! `<timestamp>_<name>.down.sql` file. The round-trip test spins up a
//! throwaway Postgres container, runs all up-migrations, inserts a small
//! amount of dummy data, then applies every down-migration in reverse order,
//! and finally re-runs all up-migrations to confirm the schema is intact.

use sqlx::{migrate::Migrator, PgPool};
use std::{
fs,
path::{Path, PathBuf},
};
use testcontainers::runners::AsyncRunner;
use testcontainers_modules::postgres::Postgres;

// ── helpers ───────────────────────────────────────────────────────────────────

fn migrations_dir() -> PathBuf {
Path::new(env!("CARGO_MANIFEST_DIR")).join("migrations")
}

/// Collect all up-migration stems (filename without extension) sorted by name.
fn up_migration_stems() -> Vec<String> {
let dir = migrations_dir();
let mut stems: Vec<String> = fs::read_dir(&dir)
.expect("cannot read migrations dir")
.filter_map(|e| e.ok())
.map(|e| e.file_name().to_string_lossy().to_string())
.filter(|name| name.ends_with(".sql") && !name.ends_with(".down.sql"))
.map(|name| name.trim_end_matches(".sql").to_string())
.collect();
stems.sort();
stems
}

// ── convention enforcement ────────────────────────────────────────────────────

/// Every up-migration must have a corresponding `.down.sql` file.
#[test]
fn every_up_migration_has_a_down_migration() {
let dir = migrations_dir();
let stems = up_migration_stems();
assert!(
!stems.is_empty(),
"No migration files found in {:?}",
dir
);

let mut missing: Vec<String> = Vec::new();
for stem in &stems {
let down_path = dir.join(format!("{stem}.down.sql"));
if !down_path.exists() {
missing.push(format!("{stem}.down.sql"));
}
}

assert!(
missing.is_empty(),
"Missing down-migration files:\n{}",
missing.join("\n")
);
}

/// Down-migration files must not exist without a corresponding up-migration.
#[test]
fn no_orphan_down_migrations() {
let dir = migrations_dir();
let up_stems: std::collections::HashSet<String> = up_migration_stems().into_iter().collect();

let orphans: Vec<String> = fs::read_dir(&dir)
.expect("cannot read migrations dir")
.filter_map(|e| e.ok())
.map(|e| e.file_name().to_string_lossy().to_string())
.filter(|name| name.ends_with(".down.sql"))
.map(|name| name.trim_end_matches(".down.sql").to_string())
.filter(|stem| !up_stems.contains(stem))
.collect();

assert!(
orphans.is_empty(),
"Orphan down-migration files (no matching up-migration):\n{}",
orphans.join("\n")
);
}

/// Down-migration files must be non-empty.
#[test]
fn down_migrations_are_non_empty() {
let dir = migrations_dir();
let mut empty: Vec<String> = Vec::new();

for stem in up_migration_stems() {
let path = dir.join(format!("{stem}.down.sql"));
if path.exists() {
let content = fs::read_to_string(&path)
.unwrap_or_default()
.trim()
.to_string();
if content.is_empty() {
empty.push(path.display().to_string());
}
}
}

assert!(
empty.is_empty(),
"Empty down-migration files:\n{}",
empty.join("\n")
);
}

// ── round-trip test ───────────────────────────────────────────────────────────

/// Spin up a real Postgres container and verify:
/// 1. All up-migrations apply cleanly.
/// 2. Dummy data can be inserted.
/// 3. All down-migrations apply cleanly (in reverse order).
/// 4. All up-migrations can be re-applied (schema integrity).
#[tokio::test]
async fn migration_round_trip() {
let container = Postgres::default().start().await.unwrap();
let port = container.get_host_port_ipv4(5432).await.unwrap();
let db_url = format!("postgres://postgres:postgres@127.0.0.1:{port}/postgres");

let pool = PgPool::connect(&db_url).await.unwrap();

// ── Step 1: run all up-migrations ─────────────────────────────────────────
let migrator = Migrator::new(migrations_dir().as_path()).await.unwrap();
migrator.run(&pool).await.expect("up-migrations failed");

// ── Step 2: insert dummy data into stable tables ───────────────────────────
insert_dummy_data(&pool).await;

// ── Step 3: apply down-migrations in reverse order ────────────────────────
let stems = up_migration_stems();
let dir = migrations_dir();

for stem in stems.iter().rev() {
let down_sql = fs::read_to_string(dir.join(format!("{stem}.down.sql")))
.unwrap_or_else(|_| panic!("cannot read {stem}.down.sql"));

sqlx::raw_sql(&down_sql)
.execute(&pool)
.await
.unwrap_or_else(|e| panic!("down-migration {stem} failed: {e}"));
}

// ── Step 4: re-run all up-migrations ──────────────────────────────────────
// sqlx Migrator tracks applied migrations in _sqlx_migrations; after the
// down pass the table itself is gone, so we reconnect to a fresh pool.
drop(pool);
let pool2 = PgPool::connect(&db_url).await.unwrap();
let migrator2 = Migrator::new(migrations_dir().as_path()).await.unwrap();
migrator2
.run(&pool2)
.await
.expect("re-run of up-migrations after rollback failed");
}

/// Insert a small amount of dummy data so the down-migrations are tested
/// against a non-empty database.
async fn insert_dummy_data(pool: &PgPool) {
// Ensure a current-month partition exists for the partitioned transactions table.
sqlx::query(
r#"
DO $$
DECLARE
pname TEXT;
s TEXT;
e TEXT;
BEGIN
pname := 'transactions_y' || TO_CHAR(NOW(), 'YYYY') || 'm' || TO_CHAR(NOW(), 'MM');
s := TO_CHAR(DATE_TRUNC('month', NOW()), 'YYYY-MM-DD');
e := TO_CHAR(DATE_TRUNC('month', NOW()) + INTERVAL '1 month', 'YYYY-MM-DD');
IF NOT EXISTS (SELECT 1 FROM pg_class WHERE relname = pname) THEN
EXECUTE format(
'CREATE TABLE %I PARTITION OF transactions FOR VALUES FROM (%L) TO (%L)',
pname, s, e
);
END IF;
END $$;
"#,
)
.execute(pool)
.await
.expect("failed to create test partition");

// Insert a transaction.
sqlx::query(
r#"
INSERT INTO transactions (stellar_account, amount, asset_code, status)
VALUES ('GABC1234567890123456789012345678901234567890123456789012', 100.0, 'USD', 'pending')
"#,
)
.execute(pool)
.await
.expect("failed to insert dummy transaction");

// Insert a feature flag.
sqlx::query(
"INSERT INTO feature_flags (name, enabled) VALUES ('test_flag', false) ON CONFLICT DO NOTHING",
)
.execute(pool)
.await
.expect("failed to insert dummy feature flag");

// Insert an audit log entry.
sqlx::query(
r#"
INSERT INTO audit_logs (entity_id, entity_type, action, actor)
VALUES (gen_random_uuid(), 'transaction', 'created', 'test')
"#,
)
.execute(pool)
.await
.expect("failed to insert dummy audit log");
}