FireCrawl本地安装
docker-compose.yaml
name: firecrawl
x-common-service: &common-service
# NOTE: If you don't want to build the service locally,
# comment out the build: statement and uncomment the image: statement
image: ghcr.io/firecrawl/firecrawl:latest
# build: apps/api
ulimits:
nofile:
soft: 65535
hard: 65535
networks:
- backend
extra_hosts:
- "host.docker.internal:host-gateway"
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
compress: "true"
x-common-env: &common-env
REDIS_URL: ${REDIS_URL:-redis://redis:6379}
REDIS_RATE_LIMIT_URL: ${REDIS_URL:-redis://redis:6379}
PLAYWRIGHT_MICROSERVICE_URL: ${PLAYWRIGHT_MICROSERVICE_URL:-http://playwright-service:3000/scrape}
POSTGRES_USER: ${POSTGRES_USER:-postgres}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
POSTGRES_DB: ${POSTGRES_DB:-postgres}
POSTGRES_HOST: ${POSTGRES_HOST:-nuq-postgres}
POSTGRES_PORT: ${POSTGRES_PORT:-5432}
USE_DB_AUTHENTICATION: ${USE_DB_AUTHENTICATION:-false}
NUM_WORKERS_PER_QUEUE: ${NUM_WORKERS_PER_QUEUE:-8}
CRAWL_CONCURRENT_REQUESTS: ${CRAWL_CONCURRENT_REQUESTS:-10}
MAX_CONCURRENT_JOBS: ${MAX_CONCURRENT_JOBS:-5}
BROWSER_POOL_SIZE: ${BROWSER_POOL_SIZE:-5}
OPENAI_API_KEY: ${OPENAI_API_KEY}
OPENAI_BASE_URL: ${OPENAI_BASE_URL}
MODEL_NAME: ${MODEL_NAME}
MODEL_EMBEDDING_NAME: ${MODEL_EMBEDDING_NAME}
OLLAMA_BASE_URL: ${OLLAMA_BASE_URL}
AUTUMN_SECRET_KEY: ${AUTUMN_SECRET_KEY}
SLACK_WEBHOOK_URL: ${SLACK_WEBHOOK_URL}
BULL_AUTH_KEY: ${BULL_AUTH_KEY}
TEST_API_KEY: ${TEST_API_KEY}
SUPABASE_ANON_TOKEN: ${SUPABASE_ANON_TOKEN}
SUPABASE_URL: ${SUPABASE_URL}
SUPABASE_SERVICE_TOKEN: ${SUPABASE_SERVICE_TOKEN}
SELF_HOSTED_WEBHOOK_URL: ${SELF_HOSTED_WEBHOOK_URL}
LOGGING_LEVEL: ${LOGGING_LEVEL}
PROXY_SERVER: ${PROXY_SERVER}
PROXY_USERNAME: ${PROXY_USERNAME}
PROXY_PASSWORD: ${PROXY_PASSWORD}
SEARXNG_ENDPOINT: ${SEARXNG_ENDPOINT}
SEARXNG_ENGINES: ${SEARXNG_ENGINES}
SEARXNG_CATEGORIES: ${SEARXNG_CATEGORIES}
MAX_CPU: ${MAX_CPU:-0.8}
MAX_RAM: ${MAX_RAM:-0.8}
ALLOW_LOCAL_WEBHOOKS: ${ALLOW_LOCAL_WEBHOOKS:-false}
services:
playwright-service:
# NOTE: If you don't want to build the service locally,
# comment out the build: statement and uncomment the image: statement
image: ghcr.io/firecrawl/playwright-service:latest
# build: apps/playwright-service-ts
environment:
PORT: 3000
PROXY_SERVER: ${PROXY_SERVER}
PROXY_USERNAME: ${PROXY_USERNAME}
PROXY_PASSWORD: ${PROXY_PASSWORD}
ALLOW_LOCAL_WEBHOOKS: ${ALLOW_LOCAL_WEBHOOKS:-false}
BLOCK_MEDIA: ${BLOCK_MEDIA:-false}
# Configure maximum concurrent pages for Playwright browser instances
MAX_CONCURRENT_PAGES: ${CRAWL_CONCURRENT_REQUESTS:-10}
networks:
- backend
# Resource limits for Docker Compose (not Swarm)
cpus: 2.0
mem_limit: 4G
memswap_limit: 4G
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
compress: "true"
tmpfs:
- /tmp/.cache:noexec,nosuid,size=1g
api:
<<: *common-service
environment:
<<: *common-env
HOST: "0.0.0.0"
PORT: ${INTERNAL_PORT:-3002}
EXTRACT_WORKER_PORT: ${EXTRACT_WORKER_PORT:-3004}
WORKER_PORT: ${WORKER_PORT:-3005}
NUQ_RABBITMQ_URL: amqp://rabbitmq:5672
ENV: local
depends_on:
redis:
condition: service_started
playwright-service:
condition: service_started
rabbitmq:
condition: service_healthy
nuq-postgres:
condition: service_started
ports:
- "${PORT:-3002}:${INTERNAL_PORT:-3002}"
command: node dist/src/harness.js --start-docker
# Resource limits for Docker Compose (not Swarm)
# Increase if you have more CPU cores/RAM available
cpus: 4.0
mem_limit: 8G
memswap_limit: 8G
redis:
# NOTE: If you want to use Valkey (open source) instead of Redis (source available),
# uncomment the Valkey statement and comment out the Redis statement.
# Using Valkey with Firecrawl is untested and not guaranteed to work. Use with caution.
image: redis:alpine
# image: valkey/valkey:alpine
networks:
- backend
command: redis-server --bind 0.0.0.0
volumes:
- redis-data:/data
logging:
driver: "json-file"
options:
max-size: "5m"
max-file: "2"
compress: "true"
rabbitmq:
image: rabbitmq:3-management
networks:
- backend
command: rabbitmq-server
healthcheck:
test: ["CMD", "rabbitmq-diagnostics", "-q", "check_running"]
interval: 10s
timeout: 10s
retries: 10
start_period: 30s
volumes:
- rabbitmq-data:/var/lib/rabbitmq
logging:
driver: "json-file"
options:
max-size: "5m"
max-file: "2"
compress: "true"
nuq-postgres:
image: ghcr.io/firecrawl/nuq-postgres:latest
environment:
POSTGRES_USER: ${POSTGRES_USER:-postgres}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
POSTGRES_DB: ${POSTGRES_DB:-postgres}
networks:
- backend
volumes:
- postgres-data:/var/lib/postgresql/data
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
compress: "true"
networks:
backend:
driver: bridge
volumes:
redis-data:
driver: local
rabbitmq-data:
driver: local
postgres-data:
driver: local
.env
# ===== Required ENVS ======
PORT=8019
HOST=0.0.0.0
# Note: PORT is used by both the main API server and worker liveness check endpoint
# To turn on DB authentication, you need to set up Supabase.
USE_DB_AUTHENTICATION=false
# ===== Optional ENVS ======
## === AI features (JSON format on scrape, /extract API) ===
# Provide your OpenAI API key here to enable AI features
# OPENAI_API_KEY=your_openai_api_key_here
# Experimental: Use Ollama
# OLLAMA_BASE_URL=http://localhost:11434/api
# MODEL_NAME=deepseek-r1:7b
# MODEL_EMBEDDING_NAME=nomic-embed-text
# Experimental: Use any OpenAI-compatible API
# OPENAI_BASE_URL=https://example.com/v1
# OPENAI_API_KEY=your_api_key_here
## === Proxy ===
# PROXY_SERVER can be a full URL (e.g. http://0.1.2.3:1234) or just an IP and port combo (e.g. 0.1.2.3:1234)
# Do not uncomment PROXY_USERNAME and PROXY_PASSWORD if your proxy is unauthenticated
# PROXY_SERVER=
# PROXY_USERNAME=
# PROXY_PASSWORD=
## === /search API ===
# By default, the /search API will use Google search.
# You can specify a SearXNG server with the JSON format enabled, if you'd like to use that instead of direct Google.
# You can also customize the engines and categories parameters, but the defaults should also work just fine.
# SEARXNG_ENDPOINT=http://your.searxng.server
# SEARXNG_ENGINES=
# SEARXNG_CATEGORIES=
## === PostgreSQL Database Configuration ===
# Configure PostgreSQL credentials. These should match the credentials used by the nuq-postgres container.
# If you change these, ensure all three are set consistently.
# Note: nuq-postgres requires using 'postgres' as the database name for proper pg_cron initialization
POSTGRES_USER=postgres
POSTGRES_PASSWORD=postgres
POSTGRES_DB=postgres
## === Redis Configuration ===
# These are auto-configured by docker-compose.yaml. You shouldn't need to change them.
# REDIS_URL=redis://redis:6379
# REDIS_RATE_LIMIT_URL=redis://redis:6379
## === Playwright Service ===
# This is auto-configured by docker-compose.yaml. You shouldn't need to change it.
# PLAYWRIGHT_MICROSERVICE_URL=http://playwright-service:3000/scrape
## === Supabase Setup (used to support DB authentication, advanced logging, etc.) ===
# SUPABASE_ANON_TOKEN=
# SUPABASE_URL=
# SUPABASE_SERVICE_TOKEN=
# Use if you've set up authentication and want to test with a real API key
# TEST_API_KEY=
# This key lets you access the queue admin panel. Change this if your deployment is publicly accessible.
BULL_AUTH_KEY=CHANGEME
## === PDF Parsing ===
# Set if you have a llamaparse key you'd like to use to parse pdfs
# LLAMAPARSE_API_KEY=
## === Monitoring ===
# Set if you'd like to send server health status messages to Slack
# SLACK_WEBHOOK_URL=
# Set if you'd like to send posthog events like job logs
# POSTHOG_API_KEY=
# POSTHOG_HOST=
## === System Resource Configuration ===
# Maximum CPU usage threshold (0.0-1.0). Worker will reject new jobs when CPU usage exceeds this value.
# Default: 0.8 (80%)
MAX_CPU=0.8
# Maximum RAM usage threshold (0.0-1.0). Worker will reject new jobs when memory usage exceeds this value.
# Default: 0.8 (80%)
MAX_RAM=0.8
# Number of workers per queue
NUM_WORKERS_PER_QUEUE=8
# Concurrent requests for crawling
CRAWL_CONCURRENT_REQUESTS=10
# Maximum concurrent jobs
MAX_CONCURRENT_JOBS=5
# Browser pool size
BROWSER_POOL_SIZE=5
# Set if you'd like to allow local webhooks to be sent to your self-hosted instance
# ALLOW_LOCAL_WEBHOOKS=true
# Block media in Playwright
# BLOCK_MEDIA=true
# Logging level (DEBUG, INFO, WARN, ERROR)
LOGGING_LEVEL=INFO