Nginx (pronounced "engine-x") is the most widely deployed web server and reverse proxy on the internet. It powers roughly a third of all websites and is the de facto standard for reverse proxying traffic to backend applications and containers. Whether you are serving static files, load balancing across multiple backends, terminating TLS, or proxying WebSocket connections, Nginx handles it all with minimal resource consumption.

This guide takes you from installation through production-grade configuration, covering every major feature you will use in a self-hosted or professional environment.

Installation

# Debian/Ubuntu (mainline repository for latest features)
sudo apt install -y curl gnupg2 ca-certificates lsb-release
echo "deb http://nginx.org/packages/mainline/debian $(lsb_release -cs) nginx" | \
  sudo tee /etc/nginx/sources.list.d/nginx.list
curl -fsSL https://nginx.org/keys/nginx_signing.key | sudo apt-key add -
sudo apt update && sudo apt install -y nginx

# Or use the distro package (usually older but stable)
sudo apt install -y nginx

# Start and enable
sudo systemctl start nginx
sudo systemctl enable nginx

# Verify
nginx -v
curl -I http://localhost

Configuration File Structure

# Main configuration file
/etc/nginx/nginx.conf

# Per-site configurations
/etc/nginx/sites-available/   # All site configs
/etc/nginx/sites-enabled/     # Symlinks to active sites
/etc/nginx/conf.d/            # Additional configs (auto-loaded)

# Common workflow:
# 1. Create config in sites-available/
# 2. Symlink to sites-enabled/
# 3. Test and reload

sudo ln -s /etc/nginx/sites-available/mysite.conf /etc/nginx/sites-enabled/
sudo nginx -t          # Test configuration syntax
sudo nginx -s reload   # Reload without downtime

Server Blocks (Virtual Hosts)

Server blocks are how Nginx serves multiple websites or applications from a single server. Each block defines which domain names and ports it responds to:

# /etc/nginx/sites-available/static-site.conf
server {
    listen 80;
    listen [::]:80;
    server_name example.com www.example.com;

    root /var/www/example.com;
    index index.html;

    # Serve static files directly
    location / {
        try_files $uri $uri/ =404;
    }

    # Custom error pages
    error_page 404 /404.html;
    error_page 500 502 503 504 /50x.html;

    # Deny access to hidden files
    location ~ /\. {
        deny all;
        access_log off;
        log_not_found off;
    }
}

Location Directives

Location blocks define how Nginx handles requests matching specific URL patterns. Understanding the matching priority is essential:

server {
    # 1. Exact match (highest priority)
    location = /health {
        return 200 'OK';
        add_header Content-Type text/plain;
    }

    # 2. Preferential prefix match (^~)
    location ^~ /static/ {
        root /var/www;
        expires 30d;
    }

    # 3. Regular expression match (~* for case-insensitive)
    location ~* \.(jpg|jpeg|png|gif|ico|svg|woff2)$ {
        root /var/www;
        expires 365d;
        add_header Cache-Control "public, immutable";
    }

    # 4. Prefix match (lowest priority)
    location / {
        proxy_pass http://backend;
    }

    # Named location for internal redirects
    location @fallback {
        proxy_pass http://backup_backend;
    }
}
Modifier Type Priority Example
= Exact match 1 (highest) location = /favicon.ico
^~ Preferential prefix 2 location ^~ /images/
~ Case-sensitive regex 3 location ~ \.php$
~* Case-insensitive regex 3 location ~* \.(jpg|png)$
(none) Prefix match 4 (lowest) location /api/

Reverse Proxy Configuration

The most common use of Nginx in a self-hosted environment is as a reverse proxy, forwarding requests to backend services (Docker containers, application servers, etc.):

# Basic reverse proxy
server {
    listen 80;
    server_name grafana.example.com;

    location / {
        proxy_pass http://localhost:3000;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;

        # Timeouts
        proxy_connect_timeout 60s;
        proxy_send_timeout 60s;
        proxy_read_timeout 60s;

        # Buffering
        proxy_buffering on;
        proxy_buffer_size 4k;
        proxy_buffers 8 4k;
    }
}

Load Balancing

Nginx can distribute traffic across multiple backend servers:

# Define an upstream group
upstream app_backend {
    # Round-robin (default)
    server 192.168.1.101:8080;
    server 192.168.1.102:8080;
    server 192.168.1.103:8080;

    # Weighted distribution
    # server 192.168.1.101:8080 weight=3;
    # server 192.168.1.102:8080 weight=1;

    # Least connections
    # least_conn;

    # IP hash (sticky sessions)
    # ip_hash;

    # Health check parameters
    server 192.168.1.101:8080 max_fails=3 fail_timeout=30s;
    server 192.168.1.104:8080 backup;  # Only used if others are down

    # Keep connections alive to backends
    keepalive 32;
}

server {
    listen 80;
    server_name app.example.com;

    location / {
        proxy_pass http://app_backend;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_http_version 1.1;
        proxy_set_header Connection "";
    }
}

SSL/TLS Configuration

# HTTPS server block with modern TLS settings
server {
    listen 80;
    server_name example.com;
    return 301 https://$server_name$request_uri;
}

server {
    listen 443 ssl http2;
    server_name example.com;

    # Certificate files
    ssl_certificate     /etc/letsencrypt/live/example.com/fullchain.pem;
    ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;

    # Protocol versions
    ssl_protocols TLSv1.2 TLSv1.3;

    # Cipher suites (TLS 1.2)
    ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384;
    ssl_prefer_server_ciphers off;

    # Session caching
    ssl_session_cache shared:SSL:10m;
    ssl_session_timeout 1d;
    ssl_session_tickets off;

    # OCSP Stapling
    ssl_stapling on;
    ssl_stapling_verify on;
    resolver 1.1.1.1 1.0.0.1 valid=300s;
    resolver_timeout 5s;

    # DH parameters (generate with: openssl dhparam -out /etc/nginx/dhparam.pem 2048)
    ssl_dhparam /etc/nginx/dhparam.pem;
}

Rate Limiting

Protect your services from abuse and brute-force attacks:

# Define rate limit zones in the http context
http {
    # 10 requests per second per IP
    limit_req_zone $binary_remote_addr zone=general:10m rate=10r/s;

    # 3 requests per minute for login endpoints
    limit_req_zone $binary_remote_addr zone=login:10m rate=3r/m;

    # 100 connections per IP
    limit_conn_zone $binary_remote_addr zone=addr:10m;
}

server {
    # Apply general rate limit with burst
    location / {
        limit_req zone=general burst=20 nodelay;
        limit_conn addr 100;
        proxy_pass http://backend;
    }

    # Strict rate limit on authentication endpoints
    location /api/login {
        limit_req zone=login burst=5;
        limit_req_status 429;
        proxy_pass http://backend;
    }
}

Caching

# Define a cache zone
proxy_cache_path /var/cache/nginx/proxy levels=1:2 keys_zone=proxy_cache:10m
                 max_size=1g inactive=60m use_temp_path=off;

server {
    location / {
        proxy_pass http://backend;
        proxy_cache proxy_cache;

        # Cache successful responses for 10 minutes
        proxy_cache_valid 200 10m;
        proxy_cache_valid 404 1m;

        # Add cache status header for debugging
        add_header X-Cache-Status $upstream_cache_status;

        # Cache key
        proxy_cache_key "$scheme$request_method$host$request_uri";

        # Bypass cache for authenticated requests
        proxy_cache_bypass $cookie_session $http_authorization;
        proxy_no_cache $cookie_session $http_authorization;

        # Serve stale content while revalidating
        proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504;
        proxy_cache_background_update on;
        proxy_cache_lock on;
    }

    # Static asset caching with browser cache headers
    location ~* \.(css|js|jpg|jpeg|png|gif|ico|svg|woff2)$ {
        root /var/www/static;
        expires 30d;
        add_header Cache-Control "public, immutable";
        access_log off;
    }
}

Security Headers

# Add to your server block or a shared snippet
# Security headers
add_header X-Content-Type-Options "nosniff" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
add_header Content-Security-Policy "default-src 'self'; script-src 'self' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; img-src 'self' data:;" always;
add_header Permissions-Policy "camera=(), microphone=(), geolocation=()" always;
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload" always;

# Hide Nginx version
server_tokens off;

# Prevent clickjacking
add_header X-Frame-Options "DENY" always;

WebSocket Proxy

WebSocket connections require special proxy configuration because they use the HTTP Upgrade mechanism:

# WebSocket-aware reverse proxy
map $http_upgrade $connection_upgrade {
    default upgrade;
    ''      close;
}

server {
    listen 443 ssl http2;
    server_name app.example.com;

    location / {
        proxy_pass http://localhost:8080;
        proxy_http_version 1.1;
        proxy_set_header Upgrade $http_upgrade;
        proxy_set_header Connection $connection_upgrade;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;

        # Longer timeouts for WebSocket connections
        proxy_read_timeout 86400s;
        proxy_send_timeout 86400s;
    }
}
Tip: Many self-hosted applications use WebSockets for real-time features: terminal emulators, chat applications, monitoring dashboards, and container management tools like usulnet. If a service works over HTTP but features like live logs or real-time updates do not work, the WebSocket proxy configuration is likely missing.

Gzip Compression

# Enable gzip compression in nginx.conf http context
gzip on;
gzip_vary on;
gzip_proxied any;
gzip_comp_level 4;
gzip_min_length 256;
gzip_types
    application/atom+xml
    application/javascript
    application/json
    application/ld+json
    application/manifest+json
    application/rss+xml
    application/vnd.geo+json
    application/vnd.ms-fontobject
    application/x-font-ttf
    application/x-web-app-manifest+json
    application/xhtml+xml
    application/xml
    font/opentype
    image/bmp
    image/svg+xml
    image/x-icon
    text/cache-manifest
    text/css
    text/plain
    text/vcard
    text/vnd.rim.location.xloc
    text/vtt
    text/x-component
    text/x-cross-domain-policy
    text/xml;

Performance Tuning

# /etc/nginx/nginx.conf - Performance-tuned configuration
user www-data;
worker_processes auto;         # One worker per CPU core
worker_rlimit_nofile 65535;    # Max open files per worker
pid /run/nginx.pid;

events {
    worker_connections 4096;   # Max connections per worker
    multi_accept on;           # Accept multiple connections at once
    use epoll;                 # Efficient event model on Linux
}

http {
    # Basic settings
    sendfile on;
    tcp_nopush on;
    tcp_nodelay on;
    keepalive_timeout 65;
    keepalive_requests 1000;
    types_hash_max_size 2048;
    server_tokens off;
    client_max_body_size 100m;

    # Buffer sizes
    client_body_buffer_size 16k;
    client_header_buffer_size 1k;
    large_client_header_buffers 4 8k;

    # Timeouts
    client_body_timeout 12;
    client_header_timeout 12;
    send_timeout 10;

    # Open file cache
    open_file_cache max=65535 inactive=60s;
    open_file_cache_valid 80s;
    open_file_cache_min_uses 1;

    # Logging
    access_log /var/log/nginx/access.log combined buffer=16k flush=5s;
    error_log /var/log/nginx/error.log warn;
}
Warning: Always test configuration changes with nginx -t before reloading. A syntax error in any configuration file will prevent Nginx from starting, potentially taking down all your services. Use nginx -s reload for zero-downtime configuration updates -- never restart the service unless absolutely necessary.

Docker Integration

# Nginx as reverse proxy in Docker Compose
services:
  nginx:
    image: nginx:alpine
    container_name: nginx-proxy
    restart: unless-stopped
    ports:
      - "80:80"
      - "443:443"
    volumes:
      - ./nginx.conf:/etc/nginx/nginx.conf:ro
      - ./conf.d:/etc/nginx/conf.d:ro
      - /etc/letsencrypt:/etc/letsencrypt:ro
      - nginx_cache:/var/cache/nginx
    networks:
      - proxy

  app:
    image: myapp:latest
    container_name: myapp
    networks:
      - proxy

networks:
  proxy:
    driver: bridge

For managing multiple Docker services behind Nginx, platforms like usulnet can integrate with your existing reverse proxy setup, providing built-in proxy configuration management that automatically generates Nginx configs for your containers.