feat: Add interactive installer and multi-endpoint LLM load balancing

- Add install.ps1 (PowerShell) and install.sh (Bash) interactive installers
- Support local, networked, and cloud AI providers in installer
- Add multi-Ollama endpoint configuration with high-speed NIC support
- Implement load balancing strategies: round-robin, failover, random
- Update LLM router with endpoint health checking and automatic failover
- Add /endpoints API for monitoring all Ollama instances
- Update docker-compose with OLLAMA_ENDPOINTS and LOAD_BALANCE_STRATEGY
- Rebrand to GooseStrike with custom icon and flag assets
This commit is contained in:
2025-11-28 12:59:45 -05:00
parent b9428df6df
commit 486fd38aff
7 changed files with 1155 additions and 29 deletions

View File

@@ -54,7 +54,7 @@ services:
- strikepackage-net - strikepackage-net
restart: unless-stopped restart: unless-stopped
# LLM Router - Routes to different LLM providers # LLM Router - Routes to different LLM providers with load balancing
llm-router: llm-router:
build: build:
context: ./services/llm-router context: ./services/llm-router
@@ -65,7 +65,12 @@ services:
environment: environment:
- OPENAI_API_KEY=${OPENAI_API_KEY:-} - OPENAI_API_KEY=${OPENAI_API_KEY:-}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-} - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
# Multi-endpoint support: comma-separated URLs
- OLLAMA_ENDPOINTS=${OLLAMA_ENDPOINTS:-http://192.168.1.50:11434}
# Legacy single endpoint (fallback)
- OLLAMA_BASE_URL=${OLLAMA_BASE_URL:-http://192.168.1.50:11434} - OLLAMA_BASE_URL=${OLLAMA_BASE_URL:-http://192.168.1.50:11434}
# Load balancing: round-robin, random, failover
- LOAD_BALANCE_STRATEGY=${LOAD_BALANCE_STRATEGY:-round-robin}
networks: networks:
- strikepackage-net - strikepackage-net
restart: unless-stopped restart: unless-stopped

623
scripts/install.ps1 Normal file
View File

@@ -0,0 +1,623 @@
<#
.SYNOPSIS
GooseStrike Installation Script
.DESCRIPTION
Interactive installer for GooseStrike AI-Powered Penetration Testing Platform
Configures local, networked, and cloud AI backends
#>
param(
[switch]$Unattended,
[string]$ConfigFile
)
$ErrorActionPreference = "Stop"
# Colors and formatting
function Write-Header {
param([string]$Text)
Write-Host ""
Write-Host "═══════════════════════════════════════════════════════════════" -ForegroundColor Red
Write-Host " $Text" -ForegroundColor White
Write-Host "═══════════════════════════════════════════════════════════════" -ForegroundColor Red
Write-Host ""
}
function Write-Step {
param([string]$Text)
Write-Host " [*] $Text" -ForegroundColor Cyan
}
function Write-Success {
param([string]$Text)
Write-Host " [✓] $Text" -ForegroundColor Green
}
function Write-Warning {
param([string]$Text)
Write-Host " [!] $Text" -ForegroundColor Yellow
}
function Write-Error {
param([string]$Text)
Write-Host " [✗] $Text" -ForegroundColor Red
}
function Get-UserChoice {
param(
[string]$Prompt,
[string[]]$Options,
[bool]$MultiSelect = $false
)
Write-Host ""
Write-Host " $Prompt" -ForegroundColor White
Write-Host ""
for ($i = 0; $i -lt $Options.Count; $i++) {
Write-Host " [$($i + 1)] $($Options[$i])" -ForegroundColor Gray
}
if ($MultiSelect) {
Write-Host ""
Write-Host " Enter numbers separated by commas (e.g., 1,2,3) or 'all'" -ForegroundColor DarkGray
$input = Read-Host " Selection"
if ($input -eq "all") {
return (1..$Options.Count)
}
return $input.Split(",") | ForEach-Object { [int]$_.Trim() }
} else {
Write-Host ""
$choice = Read-Host " Selection"
return [int]$choice
}
}
function Test-OllamaEndpoint {
param([string]$Url)
try {
$response = Invoke-WebRequest -Uri "$Url/api/tags" -TimeoutSec 5 -ErrorAction Stop
return $response.StatusCode -eq 200
} catch {
return $false
}
}
function Get-OllamaModels {
param([string]$Url)
try {
$response = Invoke-RestMethod -Uri "$Url/api/tags" -TimeoutSec 10
return $response.models | ForEach-Object { $_.name }
} catch {
return @()
}
}
# ═══════════════════════════════════════════════════════════════
# MAIN INSTALLATION FLOW
# ═══════════════════════════════════════════════════════════════
Clear-Host
Write-Host @"
AI-Powered Penetration Testing Platform
Installation Wizard
"@ -ForegroundColor Red
Write-Host " Welcome to GooseStrike! This wizard will configure your AI backends." -ForegroundColor White
Write-Host ""
# ═══════════════════════════════════════════════════════════════
# STEP 1: AI PROVIDER SELECTION
# ═══════════════════════════════════════════════════════════════
Write-Header "STEP 1: AI Provider Selection"
$providerOptions = @(
"Local Only (Ollama on this machine)",
"Networked Only (Ollama on remote machines)",
"Cloud Only (OpenAI, Anthropic, etc.)",
"Hybrid - Local + Networked",
"Hybrid - Local + Cloud",
"Hybrid - Networked + Cloud",
"Full Stack - All providers (Local + Networked + Cloud)"
)
$providerChoice = Get-UserChoice -Prompt "How do you want to run your AI models?" -Options $providerOptions
# Initialize configuration
$config = @{
local = @{
enabled = $false
url = "http://localhost:11434"
models = @()
}
networked = @{
enabled = $false
endpoints = @()
}
cloud = @{
enabled = $false
openai = @{ enabled = $false; api_key = ""; models = @("gpt-4", "gpt-4-turbo", "gpt-3.5-turbo") }
anthropic = @{ enabled = $false; api_key = ""; models = @("claude-3-opus-20240229", "claude-3-sonnet-20240229") }
groq = @{ enabled = $false; api_key = ""; models = @("llama-3.1-70b-versatile", "mixtral-8x7b-32768") }
}
default_provider = "ollama"
default_model = "llama3.2"
load_balancing = "round-robin"
}
# Determine which providers to configure based on selection
$configureLocal = $providerChoice -in @(1, 4, 5, 7)
$configureNetworked = $providerChoice -in @(2, 4, 6, 7)
$configureCloud = $providerChoice -in @(3, 5, 6, 7)
# ═══════════════════════════════════════════════════════════════
# STEP 2: LOCAL OLLAMA CONFIGURATION
# ═══════════════════════════════════════════════════════════════
if ($configureLocal) {
Write-Header "STEP 2: Local Ollama Configuration"
Write-Step "Checking for local Ollama installation..."
$localUrl = "http://localhost:11434"
$ollamaRunning = Test-OllamaEndpoint -Url $localUrl
if ($ollamaRunning) {
Write-Success "Ollama is running at $localUrl"
$models = Get-OllamaModels -Url $localUrl
if ($models.Count -gt 0) {
Write-Success "Found $($models.Count) model(s): $($models -join ', ')"
$config.local.models = $models
} else {
Write-Warning "No models found. You may need to pull models with: ollama pull llama3.2"
}
$config.local.enabled = $true
$config.local.url = $localUrl
} else {
Write-Warning "Ollama not detected at $localUrl"
$installChoice = Read-Host " Would you like to install Ollama? (y/n)"
if ($installChoice -eq "y") {
Write-Step "Opening Ollama download page..."
Start-Process "https://ollama.com/download"
Write-Host ""
Write-Host " Please install Ollama and run: ollama pull llama3.2" -ForegroundColor Yellow
Write-Host " Then re-run this installer." -ForegroundColor Yellow
Read-Host " Press Enter to continue anyway, or Ctrl+C to exit"
}
$config.local.enabled = $false
}
}
# ═══════════════════════════════════════════════════════════════
# STEP 3: NETWORKED OLLAMA CONFIGURATION
# ═══════════════════════════════════════════════════════════════
if ($configureNetworked) {
Write-Header "STEP 3: Networked Ollama Configuration"
Write-Host " Configure remote Ollama endpoints (GPU servers, clusters, etc.)" -ForegroundColor White
Write-Host ""
$addMore = $true
$endpointIndex = 1
while ($addMore) {
Write-Host " ── Endpoint #$endpointIndex ──" -ForegroundColor Cyan
Write-Host ""
# Get endpoint details
$name = Read-Host " Friendly name (e.g., 'Dell Pro Max GB10')"
$ip = Read-Host " IP Address (e.g., 192.168.1.50)"
$port = Read-Host " Port (default: 11434)"
if ([string]::IsNullOrEmpty($port)) { $port = "11434" }
# Network interface selection
Write-Host ""
Write-Host " Network interface options:" -ForegroundColor Gray
Write-Host " [1] Primary network (default)" -ForegroundColor Gray
Write-Host " [2] High-speed interface (100GbE, etc.)" -ForegroundColor Gray
$nicChoice = Read-Host " Selection (default: 1)"
$networkInterface = "primary"
$altIp = $null
if ($nicChoice -eq "2") {
$networkInterface = "high-speed"
$altIp = Read-Host " High-speed interface IP (e.g., 10.0.0.50)"
}
# Build endpoint URL
$endpointUrl = "http://${ip}:${port}"
Write-Step "Testing connection to $endpointUrl..."
$endpoint = @{
name = $name
url = $endpointUrl
ip = $ip
port = [int]$port
network_interface = $networkInterface
alt_ip = $altIp
alt_url = if ($altIp) { "http://${altIp}:${port}" } else { $null }
enabled = $false
models = @()
priority = $endpointIndex
}
if (Test-OllamaEndpoint -Url $endpointUrl) {
Write-Success "Connected to $name at $endpointUrl"
$models = Get-OllamaModels -Url $endpointUrl
if ($models.Count -gt 0) {
Write-Success "Available models: $($models -join ', ')"
$endpoint.models = $models
}
$endpoint.enabled = $true
# Test alternate interface if configured
if ($altIp) {
Write-Step "Testing high-speed interface at $($endpoint.alt_url)..."
if (Test-OllamaEndpoint -Url $endpoint.alt_url) {
Write-Success "High-speed interface reachable"
$preferHs = Read-Host " Prefer high-speed interface when available? (y/n)"
if ($preferHs -eq "y") {
$endpoint.prefer_high_speed = $true
}
} else {
Write-Warning "High-speed interface not reachable (will use primary)"
}
}
} else {
Write-Warning "Could not connect to $endpointUrl"
$keepEndpoint = Read-Host " Add anyway? (y/n)"
if ($keepEndpoint -eq "y") {
$endpoint.enabled = $false
} else {
$endpoint = $null
}
}
if ($endpoint) {
$config.networked.endpoints += $endpoint
}
Write-Host ""
$addMoreChoice = Read-Host " Add another networked endpoint? (y/n)"
$addMore = $addMoreChoice -eq "y"
$endpointIndex++
}
if ($config.networked.endpoints.Count -gt 0) {
$config.networked.enabled = $true
# Load balancing configuration
if ($config.networked.endpoints.Count -gt 1) {
Write-Host ""
Write-Host " Multiple endpoints configured. Select load balancing strategy:" -ForegroundColor White
$lbOptions = @(
"Round-robin (distribute evenly)",
"Priority-based (use highest priority first)",
"Fastest-response (route to quickest endpoint)",
"Model-based (route by model availability)"
)
$lbChoice = Get-UserChoice -Prompt "Load balancing strategy:" -Options $lbOptions
$config.load_balancing = switch ($lbChoice) {
1 { "round-robin" }
2 { "priority" }
3 { "fastest" }
4 { "model-based" }
default { "round-robin" }
}
Write-Success "Load balancing set to: $($config.load_balancing)"
}
}
}
# ═══════════════════════════════════════════════════════════════
# STEP 4: CLOUD PROVIDER CONFIGURATION
# ═══════════════════════════════════════════════════════════════
if ($configureCloud) {
Write-Header "STEP 4: Cloud Provider Configuration"
Write-Host " Configure cloud AI providers (API keys required)" -ForegroundColor White
Write-Host ""
# OpenAI
Write-Host " ── OpenAI ──" -ForegroundColor Cyan
$useOpenAI = Read-Host " Enable OpenAI? (y/n)"
if ($useOpenAI -eq "y") {
$openaiKey = Read-Host " OpenAI API Key" -AsSecureString
$openaiKeyPlain = [Runtime.InteropServices.Marshal]::PtrToStringAuto([Runtime.InteropServices.Marshal]::SecureStringToBSTR($openaiKey))
if ($openaiKeyPlain -match "^sk-") {
$config.cloud.openai.enabled = $true
$config.cloud.openai.api_key = $openaiKeyPlain
Write-Success "OpenAI configured"
} else {
Write-Warning "Invalid API key format (should start with 'sk-')"
}
}
Write-Host ""
# Anthropic
Write-Host " ── Anthropic (Claude) ──" -ForegroundColor Cyan
$useAnthropic = Read-Host " Enable Anthropic? (y/n)"
if ($useAnthropic -eq "y") {
$anthropicKey = Read-Host " Anthropic API Key" -AsSecureString
$anthropicKeyPlain = [Runtime.InteropServices.Marshal]::PtrToStringAuto([Runtime.InteropServices.Marshal]::SecureStringToBSTR($anthropicKey))
if ($anthropicKeyPlain -match "^sk-ant-") {
$config.cloud.anthropic.enabled = $true
$config.cloud.anthropic.api_key = $anthropicKeyPlain
Write-Success "Anthropic configured"
} else {
Write-Warning "Invalid API key format (should start with 'sk-ant-')"
}
}
Write-Host ""
# Groq
Write-Host " ── Groq (Fast inference) ──" -ForegroundColor Cyan
$useGroq = Read-Host " Enable Groq? (y/n)"
if ($useGroq -eq "y") {
$groqKey = Read-Host " Groq API Key" -AsSecureString
$groqKeyPlain = [Runtime.InteropServices.Marshal]::PtrToStringAuto([Runtime.InteropServices.Marshal]::SecureStringToBSTR($groqKey))
$config.cloud.groq.enabled = $true
$config.cloud.groq.api_key = $groqKeyPlain
Write-Success "Groq configured"
}
$config.cloud.enabled = $config.cloud.openai.enabled -or $config.cloud.anthropic.enabled -or $config.cloud.groq.enabled
}
# ═══════════════════════════════════════════════════════════════
# STEP 5: DEFAULT PROVIDER SELECTION
# ═══════════════════════════════════════════════════════════════
Write-Header "STEP 5: Default Provider Selection"
$availableProviders = @()
$providerMap = @{}
if ($config.local.enabled) {
$availableProviders += "Local Ollama (localhost)"
$providerMap["Local Ollama (localhost)"] = @{ provider = "ollama"; url = $config.local.url }
}
foreach ($endpoint in $config.networked.endpoints | Where-Object { $_.enabled }) {
$label = "Networked: $($endpoint.name)"
$availableProviders += $label
$providerMap[$label] = @{ provider = "ollama"; url = $endpoint.url; name = $endpoint.name }
}
if ($config.cloud.openai.enabled) {
$availableProviders += "OpenAI (GPT-4)"
$providerMap["OpenAI (GPT-4)"] = @{ provider = "openai" }
}
if ($config.cloud.anthropic.enabled) {
$availableProviders += "Anthropic (Claude)"
$providerMap["Anthropic (Claude)"] = @{ provider = "anthropic" }
}
if ($config.cloud.groq.enabled) {
$availableProviders += "Groq (Fast)"
$providerMap["Groq (Fast)"] = @{ provider = "groq" }
}
if ($availableProviders.Count -gt 0) {
$defaultChoice = Get-UserChoice -Prompt "Select your default AI provider:" -Options $availableProviders
$selectedProvider = $availableProviders[$defaultChoice - 1]
$config.default_provider = $providerMap[$selectedProvider].provider
Write-Success "Default provider: $selectedProvider"
} else {
Write-Error "No providers configured! At least one provider is required."
exit 1
}
# ═══════════════════════════════════════════════════════════════
# STEP 6: GENERATE CONFIGURATION FILES
# ═══════════════════════════════════════════════════════════════
Write-Header "STEP 6: Generating Configuration"
$scriptRoot = Split-Path -Parent $PSScriptRoot
$envFile = Join-Path $scriptRoot ".env"
$configJsonFile = Join-Path $scriptRoot "config" "ai-providers.json"
# Create config directory if needed
$configDir = Join-Path $scriptRoot "config"
if (-not (Test-Path $configDir)) {
New-Item -ItemType Directory -Path $configDir -Force | Out-Null
}
# Generate .env file
Write-Step "Generating .env file..."
$envContent = @"
#
# GooseStrike Configuration
# Generated by installer on $(Get-Date -Format "yyyy-MM-dd HH:mm:ss")
#
# Default AI Provider
DEFAULT_PROVIDER=$($config.default_provider)
DEFAULT_MODEL=$($config.default_model)
# Local Ollama
LOCAL_OLLAMA_ENABLED=$($config.local.enabled.ToString().ToLower())
LOCAL_OLLAMA_URL=$($config.local.url)
# Networked Ollama Endpoints
NETWORKED_OLLAMA_ENABLED=$($config.networked.enabled.ToString().ToLower())
LOAD_BALANCING_STRATEGY=$($config.load_balancing)
"@
# Add networked endpoints
$endpointNum = 1
foreach ($endpoint in $config.networked.endpoints) {
$envContent += @"
# Networked Endpoint $endpointNum - $($endpoint.name)
OLLAMA_ENDPOINT_${endpointNum}_NAME=$($endpoint.name)
OLLAMA_ENDPOINT_${endpointNum}_URL=$($endpoint.url)
OLLAMA_ENDPOINT_${endpointNum}_ENABLED=$($endpoint.enabled.ToString().ToLower())
OLLAMA_ENDPOINT_${endpointNum}_PRIORITY=$($endpoint.priority)
"@
if ($endpoint.alt_url) {
$envContent += "OLLAMA_ENDPOINT_${endpointNum}_ALT_URL=$($endpoint.alt_url)`n"
$envContent += "OLLAMA_ENDPOINT_${endpointNum}_PREFER_ALT=$($endpoint.prefer_high_speed.ToString().ToLower())`n`n"
}
$endpointNum++
}
$envContent += "OLLAMA_ENDPOINT_COUNT=$($config.networked.endpoints.Count)`n`n"
# Add cloud providers
$envContent += @"
# Cloud Providers
OPENAI_ENABLED=$($config.cloud.openai.enabled.ToString().ToLower())
OPENAI_API_KEY=$($config.cloud.openai.api_key)
ANTHROPIC_ENABLED=$($config.cloud.anthropic.enabled.ToString().ToLower())
ANTHROPIC_API_KEY=$($config.cloud.anthropic.api_key)
GROQ_ENABLED=$($config.cloud.groq.enabled.ToString().ToLower())
GROQ_API_KEY=$($config.cloud.groq.api_key)
"@
$envContent | Out-File -FilePath $envFile -Encoding UTF8 -Force
Write-Success "Created $envFile"
# Generate JSON config
Write-Step "Generating AI providers config..."
$config | ConvertTo-Json -Depth 10 | Out-File -FilePath $configJsonFile -Encoding UTF8 -Force
Write-Success "Created $configJsonFile"
# ═══════════════════════════════════════════════════════════════
# STEP 7: DOCKER SETUP
# ═══════════════════════════════════════════════════════════════
Write-Header "STEP 7: Docker Setup"
Write-Step "Checking Docker..."
$dockerRunning = $false
try {
$dockerVersion = docker version --format '{{.Server.Version}}' 2>$null
if ($dockerVersion) {
Write-Success "Docker is running (version $dockerVersion)"
$dockerRunning = $true
}
} catch {
Write-Warning "Docker is not running"
}
if ($dockerRunning) {
$startNow = Read-Host " Start GooseStrike now? (y/n)"
if ($startNow -eq "y") {
Write-Step "Building and starting containers..."
Push-Location $scriptRoot
docker-compose up -d --build
Pop-Location
Write-Success "GooseStrike is starting!"
Write-Host ""
Write-Host " Dashboard: http://localhost:8080" -ForegroundColor Green
Write-Host ""
}
} else {
Write-Warning "Please start Docker and run: docker-compose up -d --build"
}
# ═══════════════════════════════════════════════════════════════
# COMPLETE
# ═══════════════════════════════════════════════════════════════
Write-Header "Installation Complete!"
Write-Host " Configuration Summary:" -ForegroundColor White
Write-Host ""
if ($config.local.enabled) {
Write-Host " ✓ Local Ollama: $($config.local.url)" -ForegroundColor Green
}
foreach ($endpoint in $config.networked.endpoints | Where-Object { $_.enabled }) {
Write-Host " ✓ Networked: $($endpoint.name) @ $($endpoint.url)" -ForegroundColor Green
if ($endpoint.alt_url) {
Write-Host " └─ High-speed: $($endpoint.alt_url)" -ForegroundColor DarkGreen
}
}
if ($config.cloud.openai.enabled) {
Write-Host " ✓ OpenAI: Enabled" -ForegroundColor Green
}
if ($config.cloud.anthropic.enabled) {
Write-Host " ✓ Anthropic: Enabled" -ForegroundColor Green
}
if ($config.cloud.groq.enabled) {
Write-Host " ✓ Groq: Enabled" -ForegroundColor Green
}
Write-Host ""
Write-Host " Default Provider: $($config.default_provider)" -ForegroundColor Cyan
if ($config.networked.endpoints.Count -gt 1) {
Write-Host " Load Balancing: $($config.load_balancing)" -ForegroundColor Cyan
}
Write-Host ""
Write-Host " Files created:" -ForegroundColor White
Write-Host " - $envFile" -ForegroundColor Gray
Write-Host " - $configJsonFile" -ForegroundColor Gray
Write-Host ""
Write-Host " To start GooseStrike:" -ForegroundColor White
Write-Host " cd $scriptRoot" -ForegroundColor Gray
Write-Host " docker-compose up -d" -ForegroundColor Gray
Write-Host ""
Write-Host " Dashboard: http://localhost:8080" -ForegroundColor Green
Write-Host ""

371
scripts/install.sh Normal file
View File

@@ -0,0 +1,371 @@
#!/bin/bash
#
# GooseStrike Installation Script
# Interactive installer for AI-Powered Penetration Testing Platform
#
set -e
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
CYAN='\033[0;36m'
WHITE='\033[1;37m'
GRAY='\033[0;90m'
NC='\033[0m' # No Color
# Functions
print_header() {
echo ""
echo -e "${RED}═══════════════════════════════════════════════════════════════${NC}"
echo -e "${WHITE} $1${NC}"
echo -e "${RED}═══════════════════════════════════════════════════════════════${NC}"
echo ""
}
print_step() {
echo -e "${CYAN} [*] $1${NC}"
}
print_success() {
echo -e "${GREEN} [✓] $1${NC}"
}
print_warning() {
echo -e "${YELLOW} [!] $1${NC}"
}
print_error() {
echo -e "${RED} [✗] $1${NC}"
}
test_ollama_endpoint() {
local url=$1
curl -s --connect-timeout 5 "${url}/api/tags" > /dev/null 2>&1
return $?
}
get_ollama_models() {
local url=$1
curl -s --connect-timeout 10 "${url}/api/tags" 2>/dev/null | jq -r '.models[].name' 2>/dev/null || echo ""
}
# Get script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
# Display banner
clear
echo -e "${RED}"
cat << "EOF"
╔═══════════════════════════════════════════════════════════════╗
║ ║
║ ██████╗ ██████╗ ██████╗ ███████╗███████╗ ║
║ ██╔════╝ ██╔═══██╗██╔═══██╗██╔════╝██╔════╝ ║
║ ██║ ███╗██║ ██║██║ ██║███████╗█████╗ ║
║ ██║ ██║██║ ██║██║ ██║╚════██║██╔══╝ ║
║ ╚██████╔╝╚██████╔╝╚██████╔╝███████║███████╗ ║
║ ╚═════╝ ╚═════╝ ╚═════╝ ╚══════╝╚══════╝ ║
║ ║
║ ███████╗████████╗██████╗ ██╗██╗ ██╗███████╗ ║
║ ██╔════╝╚══██╔══╝██╔══██╗██║██║ ██╔╝██╔════╝ ║
║ ███████╗ ██║ ██████╔╝██║█████╔╝ █████╗ ║
║ ╚════██║ ██║ ██╔══██╗██║██╔═██╗ ██╔══╝ ║
║ ███████║ ██║ ██║ ██║██║██║ ██╗███████╗ ║
║ ╚══════╝ ╚═╝ ╚═╝ ╚═╝╚═╝╚═╝ ╚═╝╚══════╝ ║
║ ║
║ AI-Powered Penetration Testing Platform ║
║ Installation Wizard ║
╚═══════════════════════════════════════════════════════════════╝
EOF
echo -e "${NC}"
echo -e "${WHITE} Welcome to GooseStrike! This wizard will configure your AI backends.${NC}"
echo ""
# ═══════════════════════════════════════════════════════════════
# STEP 1: AI PROVIDER SELECTION
# ═══════════════════════════════════════════════════════════════
print_header "STEP 1: AI Provider Selection"
echo -e "${WHITE} How do you want to run your AI models?${NC}"
echo ""
echo -e "${GRAY} [1] Local Only (Ollama on this machine)${NC}"
echo -e "${GRAY} [2] Networked Only (Ollama on remote machines)${NC}"
echo -e "${GRAY} [3] Cloud Only (OpenAI, Anthropic, etc.)${NC}"
echo -e "${GRAY} [4] Hybrid - Local + Networked${NC}"
echo -e "${GRAY} [5] Hybrid - Local + Cloud${NC}"
echo -e "${GRAY} [6] Hybrid - Networked + Cloud${NC}"
echo -e "${GRAY} [7] Full Stack - All providers${NC}"
echo ""
read -p " Selection: " provider_choice
# Initialize configuration
LOCAL_ENABLED=false
LOCAL_URL="http://localhost:11434"
NETWORKED_ENABLED=false
NETWORKED_ENDPOINTS=()
CLOUD_ENABLED=false
OPENAI_ENABLED=false
OPENAI_API_KEY=""
ANTHROPIC_ENABLED=false
ANTHROPIC_API_KEY=""
DEFAULT_PROVIDER="ollama"
LOAD_BALANCE_STRATEGY="round-robin"
# Determine what to configure
case $provider_choice in
1) CONFIGURE_LOCAL=true; CONFIGURE_NETWORKED=false; CONFIGURE_CLOUD=false ;;
2) CONFIGURE_LOCAL=false; CONFIGURE_NETWORKED=true; CONFIGURE_CLOUD=false ;;
3) CONFIGURE_LOCAL=false; CONFIGURE_NETWORKED=false; CONFIGURE_CLOUD=true ;;
4) CONFIGURE_LOCAL=true; CONFIGURE_NETWORKED=true; CONFIGURE_CLOUD=false ;;
5) CONFIGURE_LOCAL=true; CONFIGURE_NETWORKED=false; CONFIGURE_CLOUD=true ;;
6) CONFIGURE_LOCAL=false; CONFIGURE_NETWORKED=true; CONFIGURE_CLOUD=true ;;
7) CONFIGURE_LOCAL=true; CONFIGURE_NETWORKED=true; CONFIGURE_CLOUD=true ;;
*) CONFIGURE_LOCAL=true; CONFIGURE_NETWORKED=false; CONFIGURE_CLOUD=false ;;
esac
# ═══════════════════════════════════════════════════════════════
# STEP 2: LOCAL OLLAMA CONFIGURATION
# ═══════════════════════════════════════════════════════════════
if [ "$CONFIGURE_LOCAL" = true ]; then
print_header "STEP 2: Local Ollama Configuration"
print_step "Checking for local Ollama installation..."
if test_ollama_endpoint "$LOCAL_URL"; then
print_success "Ollama is running at $LOCAL_URL"
models=$(get_ollama_models "$LOCAL_URL")
if [ -n "$models" ]; then
print_success "Found models: $(echo $models | tr '\n' ', ')"
else
print_warning "No models found. Run: ollama pull llama3.2"
fi
LOCAL_ENABLED=true
else
print_warning "Ollama not detected at $LOCAL_URL"
read -p " Would you like to install Ollama? (y/n): " install_choice
if [ "$install_choice" = "y" ]; then
print_step "Installing Ollama..."
curl -fsSL https://ollama.com/install.sh | sh
print_step "Starting Ollama service..."
ollama serve &
sleep 3
if test_ollama_endpoint "$LOCAL_URL"; then
print_success "Ollama installed and running"
LOCAL_ENABLED=true
fi
fi
fi
fi
# ═══════════════════════════════════════════════════════════════
# STEP 3: NETWORKED OLLAMA CONFIGURATION
# ═══════════════════════════════════════════════════════════════
if [ "$CONFIGURE_NETWORKED" = true ]; then
print_header "STEP 3: Networked Ollama Configuration"
echo -e "${WHITE} Configure remote Ollama endpoints${NC}"
echo ""
add_more=true
endpoint_num=1
while [ "$add_more" = true ]; do
echo -e "${CYAN} ── Endpoint #$endpoint_num ──${NC}"
echo ""
read -p " Friendly name (e.g., 'Dell PowerEdge'): " ep_name
read -p " IP Address: " ep_ip
read -p " Port (default: 11434): " ep_port
ep_port=${ep_port:-11434}
ep_url="http://${ep_ip}:${ep_port}"
print_step "Testing connection to $ep_url..."
if test_ollama_endpoint "$ep_url"; then
print_success "Connected to $ep_name"
NETWORKED_ENDPOINTS+=("$ep_url")
NETWORKED_ENABLED=true
else
print_warning "Could not connect to $ep_url"
read -p " Add anyway? (y/n): " keep_ep
if [ "$keep_ep" = "y" ]; then
NETWORKED_ENDPOINTS+=("$ep_url")
fi
fi
echo ""
read -p " Add another endpoint? (y/n): " add_more_choice
[ "$add_more_choice" != "y" ] && add_more=false
((endpoint_num++))
done
if [ ${#NETWORKED_ENDPOINTS[@]} -gt 1 ]; then
echo ""
echo -e "${WHITE} Load balancing strategy:${NC}"
echo -e "${GRAY} [1] Round-robin${NC}"
echo -e "${GRAY} [2] Failover (priority-based)${NC}"
echo -e "${GRAY} [3] Random${NC}"
read -p " Selection: " lb_choice
case $lb_choice in
1) LOAD_BALANCE_STRATEGY="round-robin" ;;
2) LOAD_BALANCE_STRATEGY="failover" ;;
3) LOAD_BALANCE_STRATEGY="random" ;;
esac
fi
fi
# ═══════════════════════════════════════════════════════════════
# STEP 4: CLOUD PROVIDER CONFIGURATION
# ═══════════════════════════════════════════════════════════════
if [ "$CONFIGURE_CLOUD" = true ]; then
print_header "STEP 4: Cloud Provider Configuration"
# OpenAI
echo -e "${CYAN} ── OpenAI ──${NC}"
read -p " Enable OpenAI? (y/n): " use_openai
if [ "$use_openai" = "y" ]; then
read -sp " OpenAI API Key: " openai_key
echo ""
if [[ "$openai_key" == sk-* ]]; then
OPENAI_ENABLED=true
OPENAI_API_KEY="$openai_key"
print_success "OpenAI configured"
else
print_warning "Invalid API key format"
fi
fi
echo ""
# Anthropic
echo -e "${CYAN} ── Anthropic (Claude) ──${NC}"
read -p " Enable Anthropic? (y/n): " use_anthropic
if [ "$use_anthropic" = "y" ]; then
read -sp " Anthropic API Key: " anthropic_key
echo ""
if [[ "$anthropic_key" == sk-ant-* ]]; then
ANTHROPIC_ENABLED=true
ANTHROPIC_API_KEY="$anthropic_key"
print_success "Anthropic configured"
else
print_warning "Invalid API key format"
fi
fi
CLOUD_ENABLED=$( [ "$OPENAI_ENABLED" = true ] || [ "$ANTHROPIC_ENABLED" = true ] && echo true || echo false )
fi
# ═══════════════════════════════════════════════════════════════
# STEP 5: GENERATE CONFIGURATION
# ═══════════════════════════════════════════════════════════════
print_header "STEP 5: Generating Configuration"
# Build OLLAMA_ENDPOINTS string
if [ ${#NETWORKED_ENDPOINTS[@]} -gt 0 ]; then
OLLAMA_ENDPOINTS_STR=$(IFS=,; echo "${NETWORKED_ENDPOINTS[*]}")
elif [ "$LOCAL_ENABLED" = true ]; then
OLLAMA_ENDPOINTS_STR="$LOCAL_URL"
else
OLLAMA_ENDPOINTS_STR="http://localhost:11434"
fi
# Generate .env file
print_step "Generating .env file..."
cat > "$PROJECT_ROOT/.env" << EOF
# ═══════════════════════════════════════════════════════════════
# GooseStrike Configuration
# Generated on $(date)
# ═══════════════════════════════════════════════════════════════
# Ollama Configuration
OLLAMA_ENDPOINTS=${OLLAMA_ENDPOINTS_STR}
LOAD_BALANCE_STRATEGY=${LOAD_BALANCE_STRATEGY}
# Cloud Providers
OPENAI_API_KEY=${OPENAI_API_KEY}
ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
# Default Settings
DEFAULT_PROVIDER=${DEFAULT_PROVIDER}
DEFAULT_MODEL=llama3.2
EOF
print_success "Created $PROJECT_ROOT/.env"
# ═══════════════════════════════════════════════════════════════
# STEP 6: DOCKER SETUP
# ═══════════════════════════════════════════════════════════════
print_header "STEP 6: Docker Setup"
print_step "Checking Docker..."
if docker version > /dev/null 2>&1; then
docker_version=$(docker version --format '{{.Server.Version}}')
print_success "Docker is running (version $docker_version)"
read -p " Start GooseStrike now? (y/n): " start_now
if [ "$start_now" = "y" ]; then
print_step "Building and starting containers..."
cd "$PROJECT_ROOT"
docker-compose up -d --build
print_success "GooseStrike is starting!"
echo ""
echo -e "${GREEN} Dashboard: http://localhost:8080${NC}"
echo ""
fi
else
print_warning "Docker is not running"
echo " Please start Docker and run: docker-compose up -d --build"
fi
# ═══════════════════════════════════════════════════════════════
# COMPLETE
# ═══════════════════════════════════════════════════════════════
print_header "Installation Complete!"
echo -e "${WHITE} Configuration Summary:${NC}"
echo ""
if [ "$LOCAL_ENABLED" = true ]; then
echo -e "${GREEN} ✓ Local Ollama: $LOCAL_URL${NC}"
fi
for ep in "${NETWORKED_ENDPOINTS[@]}"; do
echo -e "${GREEN} ✓ Networked: $ep${NC}"
done
if [ "$OPENAI_ENABLED" = true ]; then
echo -e "${GREEN} ✓ OpenAI: Enabled${NC}"
fi
if [ "$ANTHROPIC_ENABLED" = true ]; then
echo -e "${GREEN} ✓ Anthropic: Enabled${NC}"
fi
echo ""
echo -e "${CYAN} Load Balancing: $LOAD_BALANCE_STRATEGY${NC}"
echo ""
echo -e "${WHITE} To start GooseStrike:${NC}"
echo -e "${GRAY} cd $PROJECT_ROOT${NC}"
echo -e "${GRAY} docker-compose up -d${NC}"
echo ""
echo -e "${GREEN} Dashboard: http://localhost:8080${NC}"
echo ""

Binary file not shown.

After

Width:  |  Height:  |  Size: 310 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.7 KiB

View File

@@ -3,7 +3,7 @@
<head> <head>
<meta charset="UTF-8"> <meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0"> <meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>StrikePackageGPT - Security Analysis Dashboard</title> <title>GooseStrike - Security Analysis Dashboard</title>
<script src="https://cdn.tailwindcss.com"></script> <script src="https://cdn.tailwindcss.com"></script>
<script src="https://unpkg.com/alpinejs@3.x.x/dist/cdn.min.js" defer></script> <script src="https://unpkg.com/alpinejs@3.x.x/dist/cdn.min.js" defer></script>
<script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script> <script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
@@ -62,9 +62,9 @@
<div class="flex items-center justify-between"> <div class="flex items-center justify-between">
<div class="flex items-center gap-4"> <div class="flex items-center gap-4">
<div class="flex items-center gap-3"> <div class="flex items-center gap-3">
<img src="/static/icon.png" alt="StrikePackageGPT" class="h-10 w-10" onerror="this.style.display='none'"> <img src="/static/icon.png" alt="GooseStrike" class="h-10 w-10" onerror="this.style.display='none'">
<div> <div>
<h1 class="text-2xl font-bold text-sp-red">StrikePackageGPT</h1> <h1 class="text-2xl font-bold text-sp-red">GooseStrike</h1>
<span class="text-xs text-sp-white-muted">AI-Powered Penetration Testing Platform</span> <span class="text-xs text-sp-white-muted">AI-Powered Penetration Testing Platform</span>
</div> </div>
</div> </div>
@@ -623,10 +623,8 @@
this.messages.push({ this.messages.push({
role: 'assistant', role: 'assistant',
phase: 'Reconnaissance', phase: 'Reconnaissance',
content: `# Welcome to StrikePackageGPT! 🍁 content: `# Welcome to GooseStrike! 🍁I'm your AI-powered penetration testing assistant, following the **6-Phase Enterprise Methodology**:
I'm your AI-powered penetration testing assistant, following the **6-Phase Enterprise Methodology**:
| Phase | Purpose | | Phase | Purpose |
|-------|---------| |-------|---------|

View File

@@ -1,6 +1,7 @@
""" """
LLM Router Service LLM Router Service
Routes requests to different LLM providers (OpenAI, Anthropic, Ollama) Routes requests to different LLM providers (OpenAI, Anthropic, Ollama)
Supports multiple Ollama endpoints with load balancing
""" """
from fastapi import FastAPI, HTTPException from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.cors import CORSMiddleware
@@ -8,11 +9,15 @@ from pydantic import BaseModel
from typing import Optional, Literal from typing import Optional, Literal
import httpx import httpx
import os import os
import random
import asyncio
from dataclasses import dataclass
from datetime import datetime, timedelta
app = FastAPI( app = FastAPI(
title="LLM Router", title="LLM Router",
description="Routes requests to multiple LLM providers", description="Routes requests to multiple LLM providers with load balancing",
version="0.1.0" version="0.2.0"
) )
app.add_middleware( app.add_middleware(
@@ -26,7 +31,22 @@ app.add_middleware(
# Configuration from environment # Configuration from environment
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "") OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY", "") ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY", "")
OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "http://192.168.1.50:11434") # Support multiple Ollama endpoints (comma-separated)
OLLAMA_ENDPOINTS_STR = os.getenv("OLLAMA_ENDPOINTS", os.getenv("OLLAMA_BASE_URL", "http://192.168.1.50:11434"))
OLLAMA_ENDPOINTS = [url.strip() for url in OLLAMA_ENDPOINTS_STR.split(",") if url.strip()]
LOAD_BALANCE_STRATEGY = os.getenv("LOAD_BALANCE_STRATEGY", "round-robin") # round-robin, random, failover
@dataclass
class EndpointHealth:
url: str
healthy: bool = True
last_check: datetime = None
failure_count: int = 0
models: list = None
# Track endpoint health
endpoint_health: dict[str, EndpointHealth] = {url: EndpointHealth(url=url, models=[]) for url in OLLAMA_ENDPOINTS}
current_endpoint_index = 0
class ChatMessage(BaseModel): class ChatMessage(BaseModel):
@@ -52,33 +72,119 @@ class ChatResponse(BaseModel):
@app.get("/health") @app.get("/health")
async def health_check(): async def health_check():
"""Health check endpoint""" """Health check endpoint"""
return {"status": "healthy", "service": "llm-router"} return {"status": "healthy", "service": "llm-router", "endpoints": len(OLLAMA_ENDPOINTS)}
async def check_endpoint_health(url: str) -> tuple[bool, list]:
"""Check if an Ollama endpoint is healthy and get its models"""
try:
async with httpx.AsyncClient() as client:
response = await client.get(f"{url}/api/tags", timeout=5.0)
if response.status_code == 200:
data = response.json()
models = [m["name"] for m in data.get("models", [])]
return True, models
except Exception:
pass
return False, []
async def get_healthy_endpoint() -> Optional[str]:
"""Get a healthy Ollama endpoint based on load balancing strategy"""
global current_endpoint_index
# Refresh health status for stale checks (older than 30 seconds)
now = datetime.now()
for url, health in endpoint_health.items():
if health.last_check is None or (now - health.last_check) > timedelta(seconds=30):
is_healthy, models = await check_endpoint_health(url)
health.healthy = is_healthy
health.models = models
health.last_check = now
if is_healthy:
health.failure_count = 0
healthy_endpoints = [url for url, h in endpoint_health.items() if h.healthy]
if not healthy_endpoints:
return None
if LOAD_BALANCE_STRATEGY == "random":
return random.choice(healthy_endpoints)
elif LOAD_BALANCE_STRATEGY == "failover":
# Always use first available healthy endpoint
return healthy_endpoints[0]
else: # round-robin (default)
# Find next healthy endpoint in rotation
for _ in range(len(OLLAMA_ENDPOINTS)):
current_endpoint_index = (current_endpoint_index + 1) % len(OLLAMA_ENDPOINTS)
url = OLLAMA_ENDPOINTS[current_endpoint_index]
if url in healthy_endpoints:
return url
return healthy_endpoints[0]
@app.get("/providers") @app.get("/providers")
async def list_providers(): async def list_providers():
"""List available LLM providers and their status""" """List available LLM providers and their status"""
# Dynamically fetch Ollama models # Check all Ollama endpoints
ollama_models = [] ollama_info = []
ollama_available = False all_models = set()
try: any_available = False
async with httpx.AsyncClient() as client:
response = await client.get(f"{OLLAMA_BASE_URL}/api/tags", timeout=5.0) for url in OLLAMA_ENDPOINTS:
if response.status_code == 200: is_healthy, models = await check_endpoint_health(url)
data = response.json() endpoint_health[url].healthy = is_healthy
ollama_models = [m["name"] for m in data.get("models", [])] endpoint_health[url].models = models
ollama_available = True endpoint_health[url].last_check = datetime.now()
except Exception:
ollama_models = ["llama3", "mistral", "codellama"] # fallback ollama_info.append({
"url": url,
"available": is_healthy,
"models": models
})
if is_healthy:
any_available = True
all_models.update(models)
providers = { providers = {
"openai": {"available": bool(OPENAI_API_KEY), "models": ["gpt-4o", "gpt-4o-mini", "gpt-4-turbo"]}, "openai": {"available": bool(OPENAI_API_KEY), "models": ["gpt-4o", "gpt-4o-mini", "gpt-4-turbo"]},
"anthropic": {"available": bool(ANTHROPIC_API_KEY), "models": ["claude-sonnet-4-20250514", "claude-3-5-haiku-20241022"]}, "anthropic": {"available": bool(ANTHROPIC_API_KEY), "models": ["claude-sonnet-4-20250514", "claude-3-5-haiku-20241022"]},
"ollama": {"available": ollama_available, "base_url": OLLAMA_BASE_URL, "models": ollama_models} "ollama": {
"available": any_available,
"endpoints": ollama_info,
"load_balance_strategy": LOAD_BALANCE_STRATEGY,
"models": list(all_models) if all_models else ["llama3", "mistral", "codellama"]
}
} }
return providers return providers
@app.get("/endpoints")
async def list_endpoints():
"""List all Ollama endpoints with detailed status"""
results = []
for url in OLLAMA_ENDPOINTS:
is_healthy, models = await check_endpoint_health(url)
endpoint_health[url].healthy = is_healthy
endpoint_health[url].models = models
endpoint_health[url].last_check = datetime.now()
results.append({
"url": url,
"healthy": is_healthy,
"models": models,
"failure_count": endpoint_health[url].failure_count
})
return {
"strategy": LOAD_BALANCE_STRATEGY,
"endpoints": results,
"healthy_count": sum(1 for r in results if r["healthy"]),
"total_count": len(results)
}
@app.post("/chat", response_model=ChatResponse) @app.post("/chat", response_model=ChatResponse)
async def chat(request: ChatRequest): async def chat(request: ChatRequest):
"""Route chat request to specified LLM provider""" """Route chat request to specified LLM provider"""
@@ -174,11 +280,16 @@ async def _call_anthropic(request: ChatRequest) -> ChatResponse:
async def _call_ollama(request: ChatRequest) -> ChatResponse: async def _call_ollama(request: ChatRequest) -> ChatResponse:
"""Call Ollama API (local models)""" """Call Ollama API with load balancing across endpoints"""
endpoint = await get_healthy_endpoint()
if not endpoint:
raise HTTPException(status_code=503, detail="No healthy Ollama endpoints available")
async with httpx.AsyncClient() as client: async with httpx.AsyncClient() as client:
try: try:
response = await client.post( response = await client.post(
f"{OLLAMA_BASE_URL}/api/chat", f"{endpoint}/api/chat",
json={ json={
"model": request.model, "model": request.model,
"messages": [m.model_dump() for m in request.messages], "messages": [m.model_dump() for m in request.messages],
@@ -192,8 +303,15 @@ async def _call_ollama(request: ChatRequest) -> ChatResponse:
) )
if response.status_code != 200: if response.status_code != 200:
# Mark endpoint as failed
endpoint_health[endpoint].failure_count += 1
if endpoint_health[endpoint].failure_count >= 3:
endpoint_health[endpoint].healthy = False
raise HTTPException(status_code=response.status_code, detail=response.text) raise HTTPException(status_code=response.status_code, detail=response.text)
# Reset failure count on success
endpoint_health[endpoint].failure_count = 0
data = response.json() data = response.json()
return ChatResponse( return ChatResponse(
provider="ollama", provider="ollama",
@@ -201,11 +319,22 @@ async def _call_ollama(request: ChatRequest) -> ChatResponse:
content=data["message"]["content"], content=data["message"]["content"],
usage={ usage={
"prompt_tokens": data.get("prompt_eval_count", 0), "prompt_tokens": data.get("prompt_eval_count", 0),
"completion_tokens": data.get("eval_count", 0) "completion_tokens": data.get("eval_count", 0),
"endpoint": endpoint
} }
) )
except httpx.ConnectError: except httpx.ConnectError:
raise HTTPException(status_code=503, detail="Ollama service not available") # Mark endpoint as unhealthy
endpoint_health[endpoint].healthy = False
endpoint_health[endpoint].failure_count += 1
# Try another endpoint if available
other_endpoint = await get_healthy_endpoint()
if other_endpoint and other_endpoint != endpoint:
# Recursive call will use different endpoint
return await _call_ollama(request)
raise HTTPException(status_code=503, detail="All Ollama endpoints unavailable")
if __name__ == "__main__": if __name__ == "__main__":