🎉 RamAPI v1.0 is now available! Read the Getting Started Guide
Documentation
Deployment
Docker

Docker Deployment

Complete guide for deploying RamAPI applications with Docker and Docker Compose.

Table of Contents

  1. Dockerfile
  2. Docker Compose
  3. Multi-Stage Builds
  4. Container Best Practices
  5. Production Examples

Dockerfile

Basic Dockerfile

# Dockerfile
FROM node:20-alpine
 
# Set working directory
WORKDIR /app
 
# Copy package files
COPY package*.json ./
 
# Install dependencies
RUN npm ci --only=production
 
# Copy application code
COPY . .
 
# Build TypeScript
RUN npm run build
 
# Expose port
EXPOSE 3000
 
# Set environment
ENV NODE_ENV=production
 
# Start application
CMD ["node", "dist/index.js"]

With uWebSockets.js

# Dockerfile with uWebSockets support
FROM node:20-alpine
 
# Install build dependencies for uWebSockets
RUN apk add --no-cache \
    python3 \
    make \
    g++
 
WORKDIR /app
 
COPY package*.json ./
 
# Install all dependencies (including uWebSockets)
RUN npm ci
 
COPY . .
 
RUN npm run build
 
# Remove dev dependencies
RUN npm prune --production
 
EXPOSE 3000
 
ENV NODE_ENV=production
 
CMD ["node", "dist/index.js"]

Multi-Stage Builds

Optimized Multi-Stage Build

# Stage 1: Build
FROM node:20-alpine AS builder
 
# Install build tools
RUN apk add --no-cache python3 make g++
 
WORKDIR /app
 
# Copy package files
COPY package*.json ./
COPY tsconfig.json ./
 
# Install all dependencies
RUN npm ci
 
# Copy source code
COPY src ./src
 
# Build TypeScript
RUN npm run build
 
# Stage 2: Production
FROM node:20-alpine
 
# Install runtime dependencies for uWebSockets
RUN apk add --no-cache libstdc++
 
WORKDIR /app
 
# Copy package files
COPY package*.json ./
 
# Install production dependencies only
RUN npm ci --only=production
 
# Copy built application from builder
COPY --from=builder /app/dist ./dist
 
# Create non-root user
RUN addgroup -g 1001 -S nodejs && \
    adduser -S nodejs -u 1001
 
# Change ownership
RUN chown -R nodejs:nodejs /app
 
# Switch to non-root user
USER nodejs
 
EXPOSE 3000
 
ENV NODE_ENV=production
 
CMD ["node", "dist/index.js"]

Benefits:

  • Smaller final image (~150MB vs ~800MB)
  • No build tools in production image
  • More secure (runs as non-root user)

Docker Compose

Basic Setup

# docker-compose.yml
version: '3.8'
 
services:
  api:
    build: .
    ports:
      - "3000:3000"
    environment:
      - NODE_ENV=production
      - PORT=3000
      - DATABASE_URL=postgresql://postgres:password@db:5432/myapp
      - REDIS_URL=redis://redis:6379
      - JWT_SECRET=${JWT_SECRET}
    depends_on:
      - db
      - redis
    restart: unless-stopped
 
  db:
    image: postgres:15-alpine
    environment:
      - POSTGRES_DB=myapp
      - POSTGRES_USER=postgres
      - POSTGRES_PASSWORD=password
    volumes:
      - postgres_data:/var/lib/postgresql/data
    restart: unless-stopped
 
  redis:
    image: redis:7-alpine
    volumes:
      - redis_data:/data
    restart: unless-stopped
 
volumes:
  postgres_data:
  redis_data:

With Nginx Reverse Proxy

# docker-compose.yml
version: '3.8'
 
services:
  nginx:
    image: nginx:alpine
    ports:
      - "80:80"
      - "443:443"
    volumes:
      - ./nginx.conf:/etc/nginx/nginx.conf:ro
      - ./ssl:/etc/nginx/ssl:ro
    depends_on:
      - api
    restart: unless-stopped
 
  api:
    build: .
    expose:
      - "3000"
    environment:
      - NODE_ENV=production
      - PORT=3000
      - DATABASE_URL=postgresql://postgres:password@db:5432/myapp
    env_file:
      - .env.production
    depends_on:
      - db
      - redis
    restart: unless-stopped
 
  db:
    image: postgres:15-alpine
    environment:
      - POSTGRES_DB=myapp
      - POSTGRES_USER=postgres
      - POSTGRES_PASSWORD=${DB_PASSWORD}
    volumes:
      - postgres_data:/var/lib/postgresql/data
    restart: unless-stopped
 
  redis:
    image: redis:7-alpine
    volumes:
      - redis_data:/data
    restart: unless-stopped
 
volumes:
  postgres_data:
  redis_data:

nginx.conf:

events {
    worker_connections 1024;
}
 
http {
    upstream api {
        server api:3000;
    }
 
    server {
        listen 80;
        server_name yourdomain.com;
 
        # Redirect to HTTPS
        return 301 https://$server_name$request_uri;
    }
 
    server {
        listen 443 ssl http2;
        server_name yourdomain.com;
 
        ssl_certificate /etc/nginx/ssl/cert.pem;
        ssl_certificate_key /etc/nginx/ssl/key.pem;
 
        # Security headers
        add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
        add_header X-Frame-Options "SAMEORIGIN" always;
        add_header X-Content-Type-Options "nosniff" always;
 
        # Proxy to API
        location / {
            proxy_pass http://api;
            proxy_http_version 1.1;
            proxy_set_header Upgrade $http_upgrade;
            proxy_set_header Connection 'upgrade';
            proxy_set_header Host $host;
            proxy_set_header X-Real-IP $remote_addr;
            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
            proxy_set_header X-Forwarded-Proto $scheme;
            proxy_cache_bypass $http_upgrade;
        }
    }
}

With Observability Stack

# docker-compose.yml
version: '3.8'
 
services:
  api:
    build: .
    ports:
      - "3000:3000"
    environment:
      - TRACING_ENABLED=true
      - TRACING_ENDPOINT=http://jaeger:4318
    depends_on:
      - jaeger
      - prometheus
    restart: unless-stopped
 
  # Jaeger - Distributed Tracing
  jaeger:
    image: jaegertracing/all-in-one:latest
    ports:
      - "16686:16686"  # UI
      - "4318:4318"    # OTLP HTTP
    environment:
      - COLLECTOR_OTLP_ENABLED=true
    restart: unless-stopped
 
  # Prometheus - Metrics
  prometheus:
    image: prom/prometheus:latest
    ports:
      - "9090:9090"
    volumes:
      - ./prometheus.yml:/etc/prometheus/prometheus.yml:ro
      - prometheus_data:/prometheus
    command:
      - '--config.file=/etc/prometheus/prometheus.yml'
      - '--storage.tsdb.path=/prometheus'
    restart: unless-stopped
 
  # Grafana - Dashboards
  grafana:
    image: grafana/grafana:latest
    ports:
      - "3001:3000"
    environment:
      - GF_SECURITY_ADMIN_PASSWORD=admin
    volumes:
      - grafana_data:/var/lib/grafana
    depends_on:
      - prometheus
      - jaeger
    restart: unless-stopped
 
volumes:
  prometheus_data:
  grafana_data:

Container Best Practices

1. Use .dockerignore

node_modules
npm-debug.log
.git
.gitignore
.env
.env.*
dist
coverage
.vscode
.idea
*.md
Dockerfile
docker-compose.yml

2. Health Checks

# Add health check to Dockerfile
HEALTHCHECK --interval=30s --timeout=3s --start-period=40s --retries=3 \
  CMD node healthcheck.js

healthcheck.js:

const http = require('http');
 
const options = {
  host: 'localhost',
  port: 3000,
  path: '/health',
  timeout: 2000,
};
 
const request = http.request(options, (res) => {
  if (res.statusCode === 200) {
    process.exit(0);
  } else {
    process.exit(1);
  }
});
 
request.on('error', () => {
  process.exit(1);
});
 
request.end();

3. Environment-Specific Builds

# docker-compose.prod.yml
version: '3.8'
 
services:
  api:
    build:
      context: .
      dockerfile: Dockerfile.prod
    environment:
      - NODE_ENV=production
    deploy:
      replicas: 3
      resources:
        limits:
          cpus: '0.5'
          memory: 512M
        reservations:
          cpus: '0.25'
          memory: 256M

4. Secrets Management

# docker-compose.yml with secrets
version: '3.8'
 
services:
  api:
    build: .
    secrets:
      - jwt_secret
      - db_password
    environment:
      - JWT_SECRET_FILE=/run/secrets/jwt_secret
      - DB_PASSWORD_FILE=/run/secrets/db_password
 
secrets:
  jwt_secret:
    file: ./secrets/jwt_secret.txt
  db_password:
    file: ./secrets/db_password.txt

Load secrets in app:

import fs from 'fs';
 
function getSecret(name: string): string {
  const file = process.env[`${name}_FILE`];
  if (file) {
    return fs.readFileSync(file, 'utf8').trim();
  }
  return process.env[name] || '';
}
 
export const config = {
  jwtSecret: getSecret('JWT_SECRET'),
  dbPassword: getSecret('DB_PASSWORD'),
};

Production Examples

Complete Production Setup

Dockerfile.prod:

# Stage 1: Dependencies
FROM node:20-alpine AS deps
RUN apk add --no-cache python3 make g++
WORKDIR /app
COPY package*.json ./
RUN npm ci
 
# Stage 2: Build
FROM node:20-alpine AS builder
WORKDIR /app
COPY --from=deps /app/node_modules ./node_modules
COPY . .
RUN npm run build
RUN npm prune --production
 
# Stage 3: Production
FROM node:20-alpine
RUN apk add --no-cache libstdc++ dumb-init
 
WORKDIR /app
 
# Copy production dependencies
COPY --from=builder /app/node_modules ./node_modules
COPY --from=builder /app/dist ./dist
COPY --from=builder /app/package.json ./
 
# Create non-root user
RUN addgroup -g 1001 -S nodejs && \
    adduser -S nodejs -u 1001 && \
    chown -R nodejs:nodejs /app
 
USER nodejs
 
EXPOSE 3000
 
ENV NODE_ENV=production
 
# Use dumb-init to handle signals properly
ENTRYPOINT ["dumb-init", "--"]
CMD ["node", "dist/index.js"]

docker-compose.prod.yml:

version: '3.8'
 
services:
  api:
    build:
      context: .
      dockerfile: Dockerfile.prod
    image: myapp-api:${VERSION:-latest}
    ports:
      - "3000:3000"
    environment:
      - NODE_ENV=production
      - PORT=3000
      - DATABASE_URL=postgresql://postgres:${DB_PASSWORD}@db:5432/myapp
      - REDIS_URL=redis://redis:6379
    env_file:
      - .env.production
    depends_on:
      db:
        condition: service_healthy
      redis:
        condition: service_started
    restart: unless-stopped
    deploy:
      replicas: 3
      resources:
        limits:
          cpus: '1'
          memory: 1G
        reservations:
          cpus: '0.5'
          memory: 512M
 
  db:
    image: postgres:15-alpine
    environment:
      - POSTGRES_DB=myapp
      - POSTGRES_USER=postgres
      - POSTGRES_PASSWORD=${DB_PASSWORD}
    volumes:
      - postgres_data:/var/lib/postgresql/data
    healthcheck:
      test: ["CMD-SHELL", "pg_isready -U postgres"]
      interval: 10s
      timeout: 5s
      retries: 5
    restart: unless-stopped
 
  redis:
    image: redis:7-alpine
    volumes:
      - redis_data:/data
    command: redis-server --appendonly yes
    restart: unless-stopped
 
  nginx:
    image: nginx:alpine
    ports:
      - "80:80"
      - "443:443"
    volumes:
      - ./nginx.conf:/etc/nginx/nginx.conf:ro
      - ./ssl:/etc/nginx/ssl:ro
    depends_on:
      - api
    restart: unless-stopped
 
volumes:
  postgres_data:
    driver: local
  redis_data:
    driver: local
 
networks:
  default:
    driver: bridge

Deployment Commands

# Build and start
docker-compose -f docker-compose.prod.yml up -d --build
 
# Scale API instances
docker-compose -f docker-compose.prod.yml up -d --scale api=5
 
# View logs
docker-compose -f docker-compose.prod.yml logs -f api
 
# Stop all services
docker-compose -f docker-compose.prod.yml down
 
# Stop and remove volumes
docker-compose -f docker-compose.prod.yml down -v

CI/CD Integration

# .github/workflows/docker.yml
name: Docker Build and Push
 
on:
  push:
    branches: [main]
    tags: ['v*']
 
jobs:
  build:
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v3
 
      - name: Set up Docker Buildx
        uses: docker/setup-buildx-action@v2
 
      - name: Login to Docker Hub
        uses: docker/login-action@v2
        with:
          username: ${{ secrets.DOCKER_USERNAME }}
          password: ${{ secrets.DOCKER_TOKEN }}
 
      - name: Extract metadata
        id: meta
        uses: docker/metadata-action@v4
        with:
          images: myorg/myapp
 
      - name: Build and push
        uses: docker/build-push-action@v4
        with:
          context: .
          file: ./Dockerfile.prod
          push: true
          tags: ${{ steps.meta.outputs.tags }}
          labels: ${{ steps.meta.outputs.labels }}
          cache-from: type=gha
          cache-to: type=gha,mode=max

See Also