mirror of
https://github.com/mblanke/GooseStrike.git
synced 2026-03-01 14:00:21 -05:00
Add integration test endpoints for n8n and Ollama
This commit is contained in:
38
app/agents/base_agent.py
Normal file
38
app/agents/base_agent.py
Normal file
@@ -0,0 +1,38 @@
|
||||
"""Base LLM agent scaffolding for GooseStrike."""
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Dict
|
||||
|
||||
from .llm_router import LLMProviderError, call_llm_with_fallback
|
||||
|
||||
|
||||
def llm_call(prompt: str) -> str:
|
||||
"""Call the configured LLM providers with fallback behavior."""
|
||||
|
||||
try:
|
||||
return call_llm_with_fallback(prompt)
|
||||
except LLMProviderError as exc:
|
||||
return f"LLM providers unavailable: {exc}"
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentResult:
|
||||
prompt: str
|
||||
raw_response: str
|
||||
recommendations: Dict[str, Any]
|
||||
|
||||
|
||||
class BaseAgent:
|
||||
name = "base"
|
||||
|
||||
def run(self, context: Dict[str, Any]) -> AgentResult:
|
||||
prompt = self.build_prompt(context)
|
||||
raw = llm_call(prompt)
|
||||
return AgentResult(prompt=prompt, raw_response=raw, recommendations=self.parse(raw))
|
||||
|
||||
def build_prompt(self, context: Dict[str, Any]) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
def parse(self, raw: str) -> Dict[str, Any]:
|
||||
return {"notes": raw.strip()}
|
||||
28
app/agents/cve_agent.py
Normal file
28
app/agents/cve_agent.py
Normal file
@@ -0,0 +1,28 @@
|
||||
"""CVE triage agent."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict
|
||||
|
||||
from .base_agent import AgentResult, BaseAgent
|
||||
|
||||
|
||||
class CVEAgent(BaseAgent):
|
||||
name = "cve"
|
||||
|
||||
def build_prompt(self, context: Dict[str, Any]) -> str:
|
||||
cves = context.get("cves", [])
|
||||
lines = ["You are prioritizing CVEs for a legal assessment."]
|
||||
for cve in cves:
|
||||
lines.append(
|
||||
f"{cve.get('cve_id')}: severity={cve.get('severity')} score={cve.get('score')} desc={cve.get('description','')[:120]}"
|
||||
)
|
||||
lines.append("Provide prioritized actions and validation steps. No exploit code.")
|
||||
return "\n".join(lines)
|
||||
|
||||
def parse(self, raw: str) -> Dict[str, Any]:
|
||||
recommendations = [line.strip() for line in raw.split('\n') if line.strip()]
|
||||
return {"cve_actions": recommendations}
|
||||
|
||||
|
||||
def run(context: Dict[str, Any]) -> AgentResult:
|
||||
return CVEAgent().run(context)
|
||||
28
app/agents/exploit_agent.py
Normal file
28
app/agents/exploit_agent.py
Normal file
@@ -0,0 +1,28 @@
|
||||
"""Exploit correlation agent."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict
|
||||
|
||||
from .base_agent import AgentResult, BaseAgent
|
||||
|
||||
|
||||
class ExploitAgent(BaseAgent):
|
||||
name = "exploit"
|
||||
|
||||
def build_prompt(self, context: Dict[str, Any]) -> str:
|
||||
exploits = context.get("exploits", [])
|
||||
lines = ["Summarize how existing public exploits might apply."]
|
||||
for exploit in exploits:
|
||||
lines.append(
|
||||
f"{exploit.get('source')} -> {exploit.get('title')} references {exploit.get('cve_id')}"
|
||||
)
|
||||
lines.append("Provide validation ideas and defensive considerations only.")
|
||||
return "\n".join(lines)
|
||||
|
||||
def parse(self, raw: str) -> Dict[str, Any]:
|
||||
notes = [line.strip() for line in raw.split('\n') if line.strip()]
|
||||
return {"exploit_notes": notes}
|
||||
|
||||
|
||||
def run(context: Dict[str, Any]) -> AgentResult:
|
||||
return ExploitAgent().run(context)
|
||||
78
app/agents/llm_router.py
Normal file
78
app/agents/llm_router.py
Normal file
@@ -0,0 +1,78 @@
|
||||
"""LLM routing helpers with Claude -> HackGPT fallback."""
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
class LLMProviderError(RuntimeError):
|
||||
"""Raised when a downstream LLM provider fails."""
|
||||
|
||||
|
||||
def _call_provider(name: str, url: str, prompt: str) -> str:
|
||||
payload = {"prompt": prompt}
|
||||
api_key = os.getenv(f"{name.upper()}_API_KEY")
|
||||
headers = {"Content-Type": "application/json"}
|
||||
if api_key:
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
response = requests.post(url, json=payload, headers=headers, timeout=30)
|
||||
response.raise_for_status()
|
||||
data: Dict[str, str] = response.json() if response.headers.get("content-type", "").startswith("application/json") else {}
|
||||
return data.get("response") or data.get("answer") or data.get("text") or response.text
|
||||
|
||||
|
||||
def normalize_ollama_url(base_url: str) -> str:
|
||||
"""Return a usable Ollama generate endpoint for the supplied base URL."""
|
||||
|
||||
base_url = base_url.rstrip("/")
|
||||
if "/api" in base_url:
|
||||
if base_url.endswith("/generate"):
|
||||
return base_url
|
||||
return f"{base_url}/generate"
|
||||
return f"{base_url}/api/generate"
|
||||
|
||||
|
||||
def _call_ollama(base_url: str, prompt: str) -> str:
|
||||
"""Invoke a local Ollama instance using the configured model."""
|
||||
|
||||
url = normalize_ollama_url(base_url)
|
||||
model = os.getenv("OLLAMA_MODEL", "llama3")
|
||||
payload = {"model": model, "prompt": prompt, "stream": False}
|
||||
response = requests.post(url, json=payload, timeout=30)
|
||||
response.raise_for_status()
|
||||
data: Dict[str, str] = (
|
||||
response.json() if response.headers.get("content-type", "").startswith("application/json") else {}
|
||||
)
|
||||
return data.get("response") or data.get("output") or response.text
|
||||
|
||||
|
||||
def call_llm_with_fallback(prompt: str) -> str:
|
||||
"""Try Claude first, then HackGPT, finally return a placeholder."""
|
||||
|
||||
order: List[Tuple[str, str]] = []
|
||||
claude_url = os.getenv("CLAUDE_API_URL")
|
||||
hackgpt_url = os.getenv("HACKGPT_API_URL")
|
||||
ollama_base = os.getenv("OLLAMA_API_URL") or os.getenv("OLLAMA_BASE_URL")
|
||||
if claude_url:
|
||||
order.append(("claude", claude_url))
|
||||
if hackgpt_url:
|
||||
order.append(("hackgpt", hackgpt_url))
|
||||
if ollama_base:
|
||||
order.append(("ollama", ollama_base))
|
||||
|
||||
errors: List[str] = []
|
||||
for name, url in order:
|
||||
try:
|
||||
if name == "ollama":
|
||||
return _call_ollama(url, prompt)
|
||||
return _call_provider(name, url, prompt)
|
||||
except Exception as exc: # pragma: no cover - network dependent
|
||||
errors.append(f"{name} failed: {exc}")
|
||||
continue
|
||||
|
||||
if errors:
|
||||
raise LLMProviderError("; ".join(errors))
|
||||
|
||||
return "LLM response placeholder. Configure CLAUDE_API_URL or HACKGPT_API_URL to enable live replies."
|
||||
31
app/agents/plan_agent.py
Normal file
31
app/agents/plan_agent.py
Normal file
@@ -0,0 +1,31 @@
|
||||
"""High level planning agent."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict
|
||||
|
||||
from .base_agent import AgentResult, BaseAgent
|
||||
|
||||
|
||||
class PlanAgent(BaseAgent):
|
||||
name = "plan"
|
||||
|
||||
def build_prompt(self, context: Dict[str, Any]) -> str:
|
||||
objectives = context.get("objectives", [])
|
||||
intel = context.get("intel", [])
|
||||
lines = ["Create a prioritized plan for the GooseStrike assessment."]
|
||||
if objectives:
|
||||
lines.append("Objectives:")
|
||||
lines.extend(f"- {objective}" for objective in objectives)
|
||||
if intel:
|
||||
lines.append("Intel:")
|
||||
lines.extend(f"- {item}" for item in intel)
|
||||
lines.append("Return a numbered plan with legal, defensive-minded suggestions.")
|
||||
return "\n".join(lines)
|
||||
|
||||
def parse(self, raw: str) -> Dict[str, Any]:
|
||||
steps = [line.strip() for line in raw.split('\n') if line.strip()]
|
||||
return {"plan": steps}
|
||||
|
||||
|
||||
def run(context: Dict[str, Any]) -> AgentResult:
|
||||
return PlanAgent().run(context)
|
||||
35
app/agents/prioritizer_agent.py
Normal file
35
app/agents/prioritizer_agent.py
Normal file
@@ -0,0 +1,35 @@
|
||||
"""Target prioritizer AI agent."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from .base_agent import AgentResult, BaseAgent
|
||||
|
||||
|
||||
class PrioritizerAgent(BaseAgent):
|
||||
name = "prioritizer"
|
||||
|
||||
def build_prompt(self, context: Dict[str, Any]) -> str:
|
||||
hosts: List[Dict[str, Any]] = context.get("assets", [])
|
||||
findings = []
|
||||
for asset in hosts:
|
||||
ip = asset.get("ip")
|
||||
severity = max(
|
||||
(vuln.get("severity", "") for svc in asset.get("services", []) for vuln in svc.get("vulnerabilities", [])),
|
||||
default="",
|
||||
)
|
||||
findings.append(f"Host {ip} exposes {len(asset.get('services', []))} services (max severity: {severity}).")
|
||||
prompt_lines = [
|
||||
"You are GooseStrike's targeting aide.",
|
||||
"Rank the following hosts for next actions using MITRE ATT&CK tactics.",
|
||||
]
|
||||
prompt_lines.extend(findings or ["No assets supplied; recommend intel-gathering tasks."])
|
||||
prompt_lines.append("Return JSON with priorities, rationale, and suggested tactic per host.")
|
||||
return "\n".join(prompt_lines)
|
||||
|
||||
def parse(self, raw: str) -> Dict[str, Any]:
|
||||
return {"priorities": raw.strip()}
|
||||
|
||||
|
||||
def prioritize_targets(context: Dict[str, Any]) -> AgentResult:
|
||||
return PrioritizerAgent().run(context)
|
||||
29
app/agents/privesc_agent.py
Normal file
29
app/agents/privesc_agent.py
Normal file
@@ -0,0 +1,29 @@
|
||||
"""Privilege escalation agent."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict
|
||||
|
||||
from .base_agent import AgentResult, BaseAgent
|
||||
|
||||
|
||||
class PrivEscAgent(BaseAgent):
|
||||
name = "privesc"
|
||||
|
||||
def build_prompt(self, context: Dict[str, Any]) -> str:
|
||||
host = context.get("host")
|
||||
findings = context.get("findings", [])
|
||||
lines = ["Suggest legal privilege escalation checks for a lab machine."]
|
||||
if host:
|
||||
lines.append(f"Host: {host}")
|
||||
for finding in findings:
|
||||
lines.append(f"Finding: {finding}")
|
||||
lines.append("Provide checklists only; no exploit payloads.")
|
||||
return "\n".join(lines)
|
||||
|
||||
def parse(self, raw: str) -> Dict[str, Any]:
|
||||
steps = [line.strip() for line in raw.split('\n') if line.strip()]
|
||||
return {"privesc_checks": steps}
|
||||
|
||||
|
||||
def run(context: Dict[str, Any]) -> AgentResult:
|
||||
return PrivEscAgent().run(context)
|
||||
31
app/agents/recon_agent.py
Normal file
31
app/agents/recon_agent.py
Normal file
@@ -0,0 +1,31 @@
|
||||
"""Reconnaissance agent."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict
|
||||
|
||||
from .base_agent import AgentResult, BaseAgent
|
||||
|
||||
|
||||
class ReconAgent(BaseAgent):
|
||||
name = "recon"
|
||||
|
||||
def build_prompt(self, context: Dict[str, Any]) -> str:
|
||||
hosts = context.get("hosts", [])
|
||||
lines = ["You are advising a legal CTF recon team."]
|
||||
for host in hosts:
|
||||
services = host.get("services", [])
|
||||
service_lines = ", ".join(
|
||||
f"{svc.get('proto')}/{svc.get('port')} {svc.get('product','?')} {svc.get('version','')}"
|
||||
for svc in services
|
||||
)
|
||||
lines.append(f"Host {host.get('ip')} services: {service_lines}")
|
||||
lines.append("Suggest safe recon next steps without exploit code.")
|
||||
return "\n".join(lines)
|
||||
|
||||
def parse(self, raw: str) -> Dict[str, Any]:
|
||||
bullets = [line.strip('- ') for line in raw.split('\n') if line.strip()]
|
||||
return {"recon_steps": bullets}
|
||||
|
||||
|
||||
def run(context: Dict[str, Any]) -> AgentResult:
|
||||
return ReconAgent().run(context)
|
||||
Reference in New Issue
Block a user