Files
StrikePackageGPT/docker-compose.yml
mblanke 486fd38aff feat: Add interactive installer and multi-endpoint LLM load balancing
- Add install.ps1 (PowerShell) and install.sh (Bash) interactive installers
- Support local, networked, and cloud AI providers in installer
- Add multi-Ollama endpoint configuration with high-speed NIC support
- Implement load balancing strategies: round-robin, failover, random
- Update LLM router with endpoint health checking and automatic failover
- Add /endpoints API for monitoring all Ollama instances
- Update docker-compose with OLLAMA_ENDPOINTS and LOAD_BALANCE_STRATEGY
- Rebrand to GooseStrike with custom icon and flag assets
2025-11-28 12:59:45 -05:00

123 lines
3.2 KiB
YAML

services:
# Web Dashboard - Main user interface
dashboard:
build:
context: ./services/dashboard
dockerfile: Dockerfile
container_name: strikepackage-dashboard
ports:
- "8080:8080"
environment:
- HACKGPT_API_URL=http://strikepackage-hackgpt-api:8001
- LLM_ROUTER_URL=http://strikepackage-llm-router:8000
- KALI_EXECUTOR_URL=http://strikepackage-kali-executor:8002
depends_on:
- hackgpt-api
- llm-router
networks:
- strikepackage-net
restart: unless-stopped
# HackGPT API - Security-focused API service
hackgpt-api:
build:
context: ./services/hackgpt-api
dockerfile: Dockerfile
container_name: strikepackage-hackgpt-api
ports:
- "8001:8001"
environment:
- LLM_ROUTER_URL=http://strikepackage-llm-router:8000
- KALI_EXECUTOR_URL=http://strikepackage-kali-executor:8002
depends_on:
- llm-router
- kali-executor
networks:
- strikepackage-net
restart: unless-stopped
# Kali Executor - Command execution service
kali-executor:
build:
context: ./services/kali-executor
dockerfile: Dockerfile
container_name: strikepackage-kali-executor
ports:
- "8002:8002"
environment:
- KALI_CONTAINER_NAME=strikepackage-kali
volumes:
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- kali
networks:
- strikepackage-net
restart: unless-stopped
# LLM Router - Routes to different LLM providers with load balancing
llm-router:
build:
context: ./services/llm-router
dockerfile: Dockerfile
container_name: strikepackage-llm-router
ports:
- "8000:8000"
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
# Multi-endpoint support: comma-separated URLs
- OLLAMA_ENDPOINTS=${OLLAMA_ENDPOINTS:-http://192.168.1.50:11434}
# Legacy single endpoint (fallback)
- OLLAMA_BASE_URL=${OLLAMA_BASE_URL:-http://192.168.1.50:11434}
# Load balancing: round-robin, random, failover
- LOAD_BALANCE_STRATEGY=${LOAD_BALANCE_STRATEGY:-round-robin}
networks:
- strikepackage-net
restart: unless-stopped
# Kali Linux - Security tools container
kali:
build:
context: ./services/kali
dockerfile: Dockerfile
container_name: strikepackage-kali
stdin_open: true
tty: true
volumes:
- kali-workspace:/workspace
- ./data:/data
networks:
- strikepackage-net
cap_add:
- NET_ADMIN
- NET_RAW
restart: unless-stopped
# Ollama - Local LLM (disabled - using Dell LLM box at 192.168.1.50)
# Uncomment to use local Ollama instead
# ollama:
# image: ollama/ollama:latest
# container_name: strikepackage-ollama
# ports:
# - "11434:11434"
# volumes:
# - ollama-models:/root/.ollama
# networks:
# - strikepackage-net
# restart: unless-stopped
# # Uncomment for GPU support:
# # deploy:
# # resources:
# # reservations:
# # devices:
# # - driver: nvidia
# # count: all
# # capabilities: [gpu]
networks:
strikepackage-net:
driver: bridge
volumes:
kali-workspace:
ollama-models: