dotnet-cqrs/docs/event-streaming/storage/postgresql-storage.md

10 KiB

PostgreSQL Storage

Production-ready persistent storage with PostgreSQL.

Overview

PostgreSQL storage provides durable, ACID-compliant event storage for production deployments. It supports all advanced features including consumer groups, retention policies, event replay, and stream configuration.

Features:

  • Durable persistence - Events survive restarts
  • ACID transactions - Atomic operations
  • Consumer groups - Coordinated consumption
  • Retention policies - Automatic cleanup
  • Event replay - Rebuild projections
  • Stream configuration - Per-stream settings
  • High performance - Optimized queries with SKIP LOCKED

Installation

# Core event streaming
dotnet add package Svrnty.CQRS.Events.PostgreSQL

# Consumer groups (optional)
dotnet add package Svrnty.CQRS.Events.ConsumerGroups

# PostgreSQL driver
dotnet add package Npgsql

Configuration

Basic Setup

appsettings.json:

{
  "ConnectionStrings": {
    "EventStore": "Host=localhost;Database=eventstore;Username=postgres;Password=postgres;Port=5432"
  }
}

Program.cs:

using Svrnty.CQRS.Events.PostgreSQL;

var builder = WebApplication.CreateBuilder(args);

// Register PostgreSQL event streaming
builder.Services.AddPostgresEventStreaming(
    builder.Configuration.GetConnectionString("EventStore"));

var app = builder.Build();
app.Run();

Full Configuration

using Svrnty.CQRS.Events.PostgreSQL;
using Svrnty.CQRS.Events.ConsumerGroups;

var builder = WebApplication.CreateBuilder(args);

// Event streaming with PostgreSQL
builder.Services.AddPostgresEventStreaming(
    builder.Configuration.GetConnectionString("EventStore"),
    options =>
    {
        options.SchemaName = "events";  // Custom schema (default: public)
        options.AutoMigrate = true;     // Auto-create tables (default: true)
    });

// Consumer groups
builder.Services.AddPostgresConsumerGroups(
    builder.Configuration.GetSection("EventStreaming:ConsumerGroups"));

// Retention policies
builder.Services.AddPostgresRetentionPolicies(options =>
{
    options.Enabled = true;
    options.CleanupInterval = TimeSpan.FromHours(1);
    options.UseCleanupWindow = true;
    options.CleanupWindowStart = TimeSpan.FromHours(2);   // 2 AM UTC
    options.CleanupWindowEnd = TimeSpan.FromHours(6);     // 6 AM UTC
});

// Event replay
builder.Services.AddPostgresEventReplay();

// Stream configuration
builder.Services.AddPostgresStreamConfiguration();

var app = builder.Build();
app.Run();

appsettings.json:

{
  "ConnectionStrings": {
    "EventStore": "Host=localhost;Database=eventstore;Username=postgres;Password=postgres"
  },
  "EventStreaming": {
    "ConsumerGroups": {
      "HeartbeatInterval": "00:00:10",
      "SessionTimeout": "00:00:30",
      "CleanupInterval": "00:01:00"
    }
  }
}

Database Setup

Using Docker

# Start PostgreSQL
docker run -d --name postgres-eventstore \
  -e POSTGRES_PASSWORD=postgres \
  -e POSTGRES_DB=eventstore \
  -p 5432:5432 \
  postgres:16

# Verify
docker exec -it postgres-eventstore psql -U postgres -d eventstore -c "\dt"

Using Docker Compose

docker-compose.yml:

version: '3.8'
services:
  postgres:
    image: postgres:16
    environment:
      POSTGRES_DB: eventstore
      POSTGRES_USER: postgres
      POSTGRES_PASSWORD: postgres
    ports:
      - "5432:5432"
    volumes:
      - postgres-data:/var/lib/postgresql/data

volumes:
  postgres-data:
docker-compose up -d

Manual Setup

# Create database
createdb -U postgres eventstore

# Connect
psql -U postgres -d eventstore

# Tables created automatically on first run

Auto-Migration

PostgreSQL storage automatically creates required tables on startup:

builder.Services.AddPostgresEventStreaming(
    connectionString,
    options =>
    {
        options.AutoMigrate = true;  // Default: true
    });

// Tables created on application start:
// - events (persistent streams)
// - messages (ephemeral streams)
// - consumer_offsets
// - consumer_registrations
// - retention_policies
// - stream_configurations

Disable Auto-Migration

For production, you may want to run migrations manually:

options.AutoMigrate = false;  // Don't auto-create tables

Then run migrations manually:

# Execute migration scripts from package
psql -U postgres -d eventstore -f migrations/001_InitialSchema.sql
psql -U postgres -d eventstore -f migrations/002_ConsumerGroups.sql
psql -U postgres -d eventstore -f migrations/003_RetentionPolicies.sql

Connection String Options

Basic Connection

{
  "ConnectionStrings": {
    "EventStore": "Host=localhost;Database=eventstore;Username=postgres;Password=postgres"
  }
}

With SSL

{
  "ConnectionStrings": {
    "EventStore": "Host=prod.example.com;Database=eventstore;Username=app;Password=secret;SSL Mode=Require"
  }
}

With Connection Pooling

{
  "ConnectionStrings": {
    "EventStore": "Host=localhost;Database=eventstore;Username=postgres;Password=postgres;Minimum Pool Size=10;Maximum Pool Size=100;Connection Idle Lifetime=300"
  }
}

Azure PostgreSQL

{
  "ConnectionStrings": {
    "EventStore": "Host=myserver.postgres.database.azure.com;Database=eventstore;Username=myuser@myserver;Password=mypassword;SSL Mode=Require"
  }
}

AWS RDS PostgreSQL

{
  "ConnectionStrings": {
    "EventStore": "Host=myinstance.abc123.us-east-1.rds.amazonaws.com;Database=eventstore;Username=postgres;Password=mypassword;SSL Mode=Require"
  }
}

Production Configuration

High-Performance Settings

builder.Services.AddPostgresEventStreaming(
    "Host=localhost;Database=eventstore;Username=postgres;Password=postgres;" +
    "Minimum Pool Size=20;" +        // Maintain 20 connections
    "Maximum Pool Size=200;" +        // Allow up to 200 connections
    "Connection Idle Lifetime=300;" + // Recycle idle connections after 5 min
    "Connection Pruning Interval=10;" +// Check for idle connections every 10 sec
    "Command Timeout=30");            // 30-second command timeout

Multi-Instance Deployment

PostgreSQL storage supports multiple application instances:

# Instance 1
docker run -d myapp --WorkerId=1

# Instance 2
docker run -d myapp --WorkerId=2

# Instance 3
docker run -d myapp --WorkerId=3

# All instances share same PostgreSQL database
# Consumer groups coordinate automatically

Performance

Batch Operations

Append events in batches for better throughput:

// ✅ Good - Batch append
var events = Enumerable.Range(1, 1000)
    .Select(i => new OrderPlacedEvent { OrderId = i })
    .ToArray();

await _eventStore.AppendAsync("orders", events);

// ❌ Bad - Individual appends
for (int i = 1; i <= 1000; i++)
{
    await _eventStore.AppendAsync("orders", new[]
    {
        new OrderPlacedEvent { OrderId = i }
    });
}

Read Performance

Use pagination for large streams:

const int batchSize = 1000;
long currentOffset = 0;

while (true)
{
    var batch = new List<StoredEvent>();

    await foreach (var @event in _eventStore.ReadStreamAsync("orders", currentOffset))
    {
        batch.Add(@event);

        if (batch.Count >= batchSize)
            break;
    }

    if (batch.Count == 0)
        break;

    await ProcessBatchAsync(batch);
    currentOffset = batch.Max(e => e.Offset) + 1;
}

Dequeue Performance

PostgreSQL uses SKIP LOCKED for efficient concurrent dequeue:

-- Efficient concurrent dequeue
SELECT * FROM messages
WHERE stream_name = 'email-queue'
  AND visibility_timeout < NOW()
ORDER BY offset
LIMIT 1
FOR UPDATE SKIP LOCKED;

Multiple workers can dequeue concurrently without blocking.

Monitoring

Stream Statistics

-- Count events per stream
SELECT stream_name, COUNT(*) as event_count
FROM events
GROUP BY stream_name
ORDER BY event_count DESC;

-- Stream sizes
SELECT
  stream_name,
  COUNT(*) as event_count,
  pg_size_pretty(pg_total_relation_size('events')) as total_size
FROM events
GROUP BY stream_name;

-- Recent activity
SELECT stream_name, MAX(timestamp) as last_event
FROM events
GROUP BY stream_name
ORDER BY last_event DESC;

Consumer Lag

-- Consumer lag per group
SELECT
  co.stream_name,
  co.group_id,
  co.consumer_id,
  co.offset as consumer_offset,
  (SELECT MAX(offset) FROM events WHERE stream_name = co.stream_name) as stream_head,
  (SELECT MAX(offset) FROM events WHERE stream_name = co.stream_name) - co.offset as lag
FROM consumer_offsets co
ORDER BY lag DESC;

Database Size

-- Database size
SELECT pg_size_pretty(pg_database_size('eventstore'));

-- Table sizes
SELECT
  schemaname,
  tablename,
  pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) AS size
FROM pg_tables
WHERE schemaname = 'public'
ORDER BY pg_total_relation_size(schemaname||'.'||tablename) DESC;

Backup and Recovery

pg_dump Backup

# Full backup
pg_dump -U postgres -d eventstore -F c -f eventstore_backup.dump

# Restore
pg_restore -U postgres -d eventstore_new eventstore_backup.dump

Continuous Archiving (WAL)

# Enable WAL archiving in postgresql.conf
wal_level = replica
archive_mode = on
archive_command = 'cp %p /var/lib/postgresql/wal_archive/%f'

# Base backup
pg_basebackup -U postgres -D /var/lib/postgresql/backup -F tar -z -P

# Point-in-time recovery
# Restore base backup, then replay WAL files

Streaming Replication

# Primary server (write)
# Standby server (read replicas)

# Read-only queries can use standby
# Writes go to primary

Best Practices

DO

  • Use connection pooling
  • Batch operations when possible
  • Monitor database size and performance
  • Set up regular backups
  • Use appropriate indexes
  • Configure retention policies
  • Monitor consumer lag

DON'T

  • Don't store large binary data in events
  • Don't delete events manually (use retention policies)
  • Don't skip backups
  • Don't ignore slow query warnings
  • Don't run without indexes
  • Don't use single connection per operation

See Also