mirror of
https://github.com/mblanke/ThreatHunt.git
synced 2026-03-01 14:00:20 -05:00
Implement Phase 4: ML threat detection, automated playbooks, and advanced reporting
Co-authored-by: mblanke <9078342+mblanke@users.noreply.github.com>
This commit is contained in:
152
backend/alembic/versions/c3d4e5f6g7h8_add_phase_4_tables.py
Normal file
152
backend/alembic/versions/c3d4e5f6g7h8_add_phase_4_tables.py
Normal file
@@ -0,0 +1,152 @@
|
||||
"""Add Phase 4 tables
|
||||
|
||||
Revision ID: c3d4e5f6g7h8
|
||||
Revises: b2c3d4e5f6g7
|
||||
Create Date: 2025-12-09 17:35:00.000000
|
||||
|
||||
"""
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = 'c3d4e5f6g7h8'
|
||||
down_revision: Union[str, Sequence[str], None] = 'b2c3d4e5f6g7'
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""Upgrade schema for Phase 4."""
|
||||
|
||||
# Create playbooks table
|
||||
op.create_table(
|
||||
'playbooks',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('tenant_id', sa.Integer(), nullable=False),
|
||||
sa.Column('name', sa.String(), nullable=False),
|
||||
sa.Column('description', sa.Text(), nullable=True),
|
||||
sa.Column('trigger_type', sa.String(), nullable=False),
|
||||
sa.Column('trigger_config', sa.JSON(), nullable=True),
|
||||
sa.Column('actions', sa.JSON(), nullable=False),
|
||||
sa.Column('is_enabled', sa.Boolean(), nullable=False),
|
||||
sa.Column('created_by', sa.Integer(), nullable=False),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=True),
|
||||
sa.ForeignKeyConstraint(['tenant_id'], ['tenants.id'], ),
|
||||
sa.ForeignKeyConstraint(['created_by'], ['users.id'], ),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index(op.f('ix_playbooks_id'), 'playbooks', ['id'], unique=False)
|
||||
op.create_index(op.f('ix_playbooks_name'), 'playbooks', ['name'], unique=False)
|
||||
|
||||
# Create playbook_executions table
|
||||
op.create_table(
|
||||
'playbook_executions',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('playbook_id', sa.Integer(), nullable=False),
|
||||
sa.Column('tenant_id', sa.Integer(), nullable=False),
|
||||
sa.Column('status', sa.String(), nullable=False),
|
||||
sa.Column('started_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('completed_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('result', sa.JSON(), nullable=True),
|
||||
sa.Column('error_message', sa.Text(), nullable=True),
|
||||
sa.Column('triggered_by', sa.Integer(), nullable=True),
|
||||
sa.ForeignKeyConstraint(['playbook_id'], ['playbooks.id'], ),
|
||||
sa.ForeignKeyConstraint(['tenant_id'], ['tenants.id'], ),
|
||||
sa.ForeignKeyConstraint(['triggered_by'], ['users.id'], ),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index(op.f('ix_playbook_executions_id'), 'playbook_executions', ['id'], unique=False)
|
||||
|
||||
# Create threat_scores table
|
||||
op.create_table(
|
||||
'threat_scores',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('tenant_id', sa.Integer(), nullable=False),
|
||||
sa.Column('host_id', sa.Integer(), nullable=True),
|
||||
sa.Column('artifact_id', sa.Integer(), nullable=True),
|
||||
sa.Column('score', sa.Float(), nullable=False),
|
||||
sa.Column('confidence', sa.Float(), nullable=False),
|
||||
sa.Column('threat_type', sa.String(), nullable=False),
|
||||
sa.Column('description', sa.Text(), nullable=True),
|
||||
sa.Column('indicators', sa.JSON(), nullable=True),
|
||||
sa.Column('ml_model_version', sa.String(), nullable=True),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=True),
|
||||
sa.ForeignKeyConstraint(['tenant_id'], ['tenants.id'], ),
|
||||
sa.ForeignKeyConstraint(['host_id'], ['hosts.id'], ),
|
||||
sa.ForeignKeyConstraint(['artifact_id'], ['artifacts.id'], ),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index(op.f('ix_threat_scores_id'), 'threat_scores', ['id'], unique=False)
|
||||
op.create_index(op.f('ix_threat_scores_score'), 'threat_scores', ['score'], unique=False)
|
||||
op.create_index(op.f('ix_threat_scores_created_at'), 'threat_scores', ['created_at'], unique=False)
|
||||
|
||||
# Create report_templates table
|
||||
op.create_table(
|
||||
'report_templates',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('tenant_id', sa.Integer(), nullable=False),
|
||||
sa.Column('name', sa.String(), nullable=False),
|
||||
sa.Column('description', sa.Text(), nullable=True),
|
||||
sa.Column('template_type', sa.String(), nullable=False),
|
||||
sa.Column('template_config', sa.JSON(), nullable=False),
|
||||
sa.Column('is_default', sa.Boolean(), nullable=False),
|
||||
sa.Column('created_by', sa.Integer(), nullable=False),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=True),
|
||||
sa.ForeignKeyConstraint(['tenant_id'], ['tenants.id'], ),
|
||||
sa.ForeignKeyConstraint(['created_by'], ['users.id'], ),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index(op.f('ix_report_templates_id'), 'report_templates', ['id'], unique=False)
|
||||
op.create_index(op.f('ix_report_templates_name'), 'report_templates', ['name'], unique=False)
|
||||
|
||||
# Create reports table
|
||||
op.create_table(
|
||||
'reports',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('tenant_id', sa.Integer(), nullable=False),
|
||||
sa.Column('template_id', sa.Integer(), nullable=True),
|
||||
sa.Column('title', sa.String(), nullable=False),
|
||||
sa.Column('report_type', sa.String(), nullable=False),
|
||||
sa.Column('format', sa.String(), nullable=False),
|
||||
sa.Column('file_path', sa.String(), nullable=True),
|
||||
sa.Column('status', sa.String(), nullable=False),
|
||||
sa.Column('generated_by', sa.Integer(), nullable=False),
|
||||
sa.Column('generated_at', sa.DateTime(), nullable=True),
|
||||
sa.ForeignKeyConstraint(['tenant_id'], ['tenants.id'], ),
|
||||
sa.ForeignKeyConstraint(['template_id'], ['report_templates.id'], ),
|
||||
sa.ForeignKeyConstraint(['generated_by'], ['users.id'], ),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index(op.f('ix_reports_id'), 'reports', ['id'], unique=False)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""Downgrade schema for Phase 4."""
|
||||
|
||||
# Drop reports table
|
||||
op.drop_index(op.f('ix_reports_id'), table_name='reports')
|
||||
op.drop_table('reports')
|
||||
|
||||
# Drop report_templates table
|
||||
op.drop_index(op.f('ix_report_templates_name'), table_name='report_templates')
|
||||
op.drop_index(op.f('ix_report_templates_id'), table_name='report_templates')
|
||||
op.drop_table('report_templates')
|
||||
|
||||
# Drop threat_scores table
|
||||
op.drop_index(op.f('ix_threat_scores_created_at'), table_name='threat_scores')
|
||||
op.drop_index(op.f('ix_threat_scores_score'), table_name='threat_scores')
|
||||
op.drop_index(op.f('ix_threat_scores_id'), table_name='threat_scores')
|
||||
op.drop_table('threat_scores')
|
||||
|
||||
# Drop playbook_executions table
|
||||
op.drop_index(op.f('ix_playbook_executions_id'), table_name='playbook_executions')
|
||||
op.drop_table('playbook_executions')
|
||||
|
||||
# Drop playbooks table
|
||||
op.drop_index(op.f('ix_playbooks_name'), table_name='playbooks')
|
||||
op.drop_index(op.f('ix_playbooks_id'), table_name='playbooks')
|
||||
op.drop_table('playbooks')
|
||||
144
backend/app/api/routes/playbooks.py
Normal file
144
backend/app/api/routes/playbooks.py
Normal file
@@ -0,0 +1,144 @@
|
||||
from typing import List
|
||||
from fastapi import APIRouter, Depends, HTTPException, status
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app.core.database import get_db
|
||||
from app.core.deps import get_current_active_user, require_role, get_tenant_id
|
||||
from app.core.playbook_engine import get_playbook_engine
|
||||
from app.models.user import User
|
||||
from app.models.playbook import Playbook, PlaybookExecution
|
||||
from app.schemas.playbook import PlaybookCreate, PlaybookRead, PlaybookUpdate, PlaybookExecutionRead
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/", response_model=List[PlaybookRead])
|
||||
async def list_playbooks(
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
current_user: User = Depends(get_current_active_user),
|
||||
tenant_id: int = Depends(get_tenant_id),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""List playbooks scoped to user's tenant"""
|
||||
playbooks = db.query(Playbook).filter(
|
||||
Playbook.tenant_id == tenant_id
|
||||
).offset(skip).limit(limit).all()
|
||||
return playbooks
|
||||
|
||||
|
||||
@router.post("/", response_model=PlaybookRead, status_code=status.HTTP_201_CREATED)
|
||||
async def create_playbook(
|
||||
playbook_data: PlaybookCreate,
|
||||
current_user: User = Depends(require_role(["admin"])),
|
||||
tenant_id: int = Depends(get_tenant_id),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""Create a new playbook (admin only)"""
|
||||
playbook = Playbook(
|
||||
tenant_id=tenant_id,
|
||||
created_by=current_user.id,
|
||||
**playbook_data.dict()
|
||||
)
|
||||
db.add(playbook)
|
||||
db.commit()
|
||||
db.refresh(playbook)
|
||||
return playbook
|
||||
|
||||
|
||||
@router.get("/{playbook_id}", response_model=PlaybookRead)
|
||||
async def get_playbook(
|
||||
playbook_id: int,
|
||||
current_user: User = Depends(get_current_active_user),
|
||||
tenant_id: int = Depends(get_tenant_id),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""Get playbook by ID"""
|
||||
playbook = db.query(Playbook).filter(
|
||||
Playbook.id == playbook_id,
|
||||
Playbook.tenant_id == tenant_id
|
||||
).first()
|
||||
|
||||
if not playbook:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Playbook not found"
|
||||
)
|
||||
|
||||
return playbook
|
||||
|
||||
|
||||
@router.post("/{playbook_id}/execute", response_model=PlaybookExecutionRead)
|
||||
async def execute_playbook(
|
||||
playbook_id: int,
|
||||
current_user: User = Depends(get_current_active_user),
|
||||
tenant_id: int = Depends(get_tenant_id),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""Execute a playbook"""
|
||||
playbook = db.query(Playbook).filter(
|
||||
Playbook.id == playbook_id,
|
||||
Playbook.tenant_id == tenant_id
|
||||
).first()
|
||||
|
||||
if not playbook:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Playbook not found"
|
||||
)
|
||||
|
||||
if not playbook.is_enabled:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Playbook is disabled"
|
||||
)
|
||||
|
||||
# Create execution record
|
||||
execution = PlaybookExecution(
|
||||
playbook_id=playbook_id,
|
||||
tenant_id=tenant_id,
|
||||
status="running",
|
||||
triggered_by=current_user.id
|
||||
)
|
||||
db.add(execution)
|
||||
db.commit()
|
||||
db.refresh(execution)
|
||||
|
||||
# Execute playbook asynchronously
|
||||
try:
|
||||
engine = get_playbook_engine()
|
||||
result = await engine.execute_playbook(
|
||||
{"actions": playbook.actions},
|
||||
{"tenant_id": tenant_id, "user_id": current_user.id}
|
||||
)
|
||||
|
||||
execution.status = result["status"]
|
||||
execution.result = result
|
||||
from datetime import datetime, timezone
|
||||
execution.completed_at = datetime.now(timezone.utc)
|
||||
except Exception as e:
|
||||
execution.status = "failed"
|
||||
execution.error_message = str(e)
|
||||
|
||||
db.commit()
|
||||
db.refresh(execution)
|
||||
|
||||
return execution
|
||||
|
||||
|
||||
@router.get("/{playbook_id}/executions", response_model=List[PlaybookExecutionRead])
|
||||
async def list_playbook_executions(
|
||||
playbook_id: int,
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
current_user: User = Depends(get_current_active_user),
|
||||
tenant_id: int = Depends(get_tenant_id),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""List executions for a playbook"""
|
||||
executions = db.query(PlaybookExecution).filter(
|
||||
PlaybookExecution.playbook_id == playbook_id,
|
||||
PlaybookExecution.tenant_id == tenant_id
|
||||
).order_by(PlaybookExecution.started_at.desc()).offset(skip).limit(limit).all()
|
||||
|
||||
return executions
|
||||
120
backend/app/api/routes/reports.py
Normal file
120
backend/app/api/routes/reports.py
Normal file
@@ -0,0 +1,120 @@
|
||||
from typing import List
|
||||
from fastapi import APIRouter, Depends, HTTPException, status
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app.core.database import get_db
|
||||
from app.core.deps import get_current_active_user, get_tenant_id
|
||||
from app.models.user import User
|
||||
from app.models.report_template import ReportTemplate, Report
|
||||
from app.schemas.report import ReportTemplateCreate, ReportTemplateRead, ReportCreate, ReportRead
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/templates", response_model=List[ReportTemplateRead])
|
||||
async def list_report_templates(
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
current_user: User = Depends(get_current_active_user),
|
||||
tenant_id: int = Depends(get_tenant_id),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""List report templates scoped to tenant"""
|
||||
templates = db.query(ReportTemplate).filter(
|
||||
ReportTemplate.tenant_id == tenant_id
|
||||
).offset(skip).limit(limit).all()
|
||||
return templates
|
||||
|
||||
|
||||
@router.post("/templates", response_model=ReportTemplateRead, status_code=status.HTTP_201_CREATED)
|
||||
async def create_report_template(
|
||||
template_data: ReportTemplateCreate,
|
||||
current_user: User = Depends(get_current_active_user),
|
||||
tenant_id: int = Depends(get_tenant_id),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""Create a new report template"""
|
||||
template = ReportTemplate(
|
||||
tenant_id=tenant_id,
|
||||
created_by=current_user.id,
|
||||
**template_data.dict()
|
||||
)
|
||||
db.add(template)
|
||||
db.commit()
|
||||
db.refresh(template)
|
||||
return template
|
||||
|
||||
|
||||
@router.post("/generate", response_model=ReportRead, status_code=status.HTTP_201_CREATED)
|
||||
async def generate_report(
|
||||
report_data: ReportCreate,
|
||||
current_user: User = Depends(get_current_active_user),
|
||||
tenant_id: int = Depends(get_tenant_id),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Generate a new report
|
||||
|
||||
This is a simplified implementation. In production, this would:
|
||||
1. Fetch relevant data based on report type
|
||||
2. Apply template formatting
|
||||
3. Generate PDF/HTML output
|
||||
4. Store file and return path
|
||||
"""
|
||||
report = Report(
|
||||
tenant_id=tenant_id,
|
||||
template_id=report_data.template_id,
|
||||
title=report_data.title,
|
||||
report_type=report_data.report_type,
|
||||
format=report_data.format,
|
||||
status="generating",
|
||||
generated_by=current_user.id
|
||||
)
|
||||
db.add(report)
|
||||
db.commit()
|
||||
|
||||
# Simulate report generation
|
||||
# In production, this would be an async task
|
||||
report.status = "completed"
|
||||
report.file_path = f"/reports/{report.id}.{report_data.format}"
|
||||
db.commit()
|
||||
db.refresh(report)
|
||||
|
||||
return report
|
||||
|
||||
|
||||
@router.get("/", response_model=List[ReportRead])
|
||||
async def list_reports(
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
current_user: User = Depends(get_current_active_user),
|
||||
tenant_id: int = Depends(get_tenant_id),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""List generated reports"""
|
||||
reports = db.query(Report).filter(
|
||||
Report.tenant_id == tenant_id
|
||||
).order_by(Report.generated_at.desc()).offset(skip).limit(limit).all()
|
||||
return reports
|
||||
|
||||
|
||||
@router.get("/{report_id}", response_model=ReportRead)
|
||||
async def get_report(
|
||||
report_id: int,
|
||||
current_user: User = Depends(get_current_active_user),
|
||||
tenant_id: int = Depends(get_tenant_id),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""Get a specific report"""
|
||||
report = db.query(Report).filter(
|
||||
Report.id == report_id,
|
||||
Report.tenant_id == tenant_id
|
||||
).first()
|
||||
|
||||
if not report:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Report not found"
|
||||
)
|
||||
|
||||
return report
|
||||
127
backend/app/api/routes/threat_intel.py
Normal file
127
backend/app/api/routes/threat_intel.py
Normal file
@@ -0,0 +1,127 @@
|
||||
from typing import List
|
||||
from fastapi import APIRouter, Depends, HTTPException, status
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app.core.database import get_db
|
||||
from app.core.deps import get_current_active_user, get_tenant_id
|
||||
from app.core.threat_intel import get_threat_analyzer
|
||||
from app.models.user import User
|
||||
from app.models.threat_score import ThreatScore
|
||||
from app.models.host import Host
|
||||
from app.models.artifact import Artifact
|
||||
from app.schemas.threat_score import ThreatScoreRead, ThreatScoreCreate
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post("/analyze/host/{host_id}", response_model=ThreatScoreRead)
|
||||
async def analyze_host(
|
||||
host_id: int,
|
||||
current_user: User = Depends(get_current_active_user),
|
||||
tenant_id: int = Depends(get_tenant_id),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Analyze a host for threats using ML
|
||||
"""
|
||||
host = db.query(Host).filter(
|
||||
Host.id == host_id,
|
||||
Host.tenant_id == tenant_id
|
||||
).first()
|
||||
|
||||
if not host:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Host not found"
|
||||
)
|
||||
|
||||
# Analyze host
|
||||
analyzer = get_threat_analyzer()
|
||||
analysis = analyzer.analyze_host({
|
||||
"hostname": host.hostname,
|
||||
"ip_address": host.ip_address,
|
||||
"os": host.os,
|
||||
"host_metadata": host.host_metadata
|
||||
})
|
||||
|
||||
# Store threat score
|
||||
threat_score = ThreatScore(
|
||||
tenant_id=tenant_id,
|
||||
host_id=host_id,
|
||||
score=analysis["score"],
|
||||
confidence=analysis["confidence"],
|
||||
threat_type=analysis["threat_type"],
|
||||
indicators=analysis["indicators"],
|
||||
ml_model_version=analysis["ml_model_version"]
|
||||
)
|
||||
db.add(threat_score)
|
||||
db.commit()
|
||||
db.refresh(threat_score)
|
||||
|
||||
return threat_score
|
||||
|
||||
|
||||
@router.post("/analyze/artifact/{artifact_id}", response_model=ThreatScoreRead)
|
||||
async def analyze_artifact(
|
||||
artifact_id: int,
|
||||
current_user: User = Depends(get_current_active_user),
|
||||
tenant_id: int = Depends(get_tenant_id),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Analyze an artifact for threats
|
||||
"""
|
||||
artifact = db.query(Artifact).filter(Artifact.id == artifact_id).first()
|
||||
|
||||
if not artifact:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Artifact not found"
|
||||
)
|
||||
|
||||
# Analyze artifact
|
||||
analyzer = get_threat_analyzer()
|
||||
analysis = analyzer.analyze_artifact({
|
||||
"artifact_type": artifact.artifact_type,
|
||||
"value": artifact.value
|
||||
})
|
||||
|
||||
# Store threat score
|
||||
threat_score = ThreatScore(
|
||||
tenant_id=tenant_id,
|
||||
artifact_id=artifact_id,
|
||||
score=analysis["score"],
|
||||
confidence=analysis["confidence"],
|
||||
threat_type=analysis["threat_type"],
|
||||
indicators=analysis["indicators"],
|
||||
ml_model_version=analysis["ml_model_version"]
|
||||
)
|
||||
db.add(threat_score)
|
||||
db.commit()
|
||||
db.refresh(threat_score)
|
||||
|
||||
return threat_score
|
||||
|
||||
|
||||
@router.get("/scores", response_model=List[ThreatScoreRead])
|
||||
async def list_threat_scores(
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
min_score: float = 0.0,
|
||||
threat_type: str = None,
|
||||
current_user: User = Depends(get_current_active_user),
|
||||
tenant_id: int = Depends(get_tenant_id),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
List threat scores with filtering
|
||||
"""
|
||||
query = db.query(ThreatScore).filter(ThreatScore.tenant_id == tenant_id)
|
||||
|
||||
if min_score:
|
||||
query = query.filter(ThreatScore.score >= min_score)
|
||||
if threat_type:
|
||||
query = query.filter(ThreatScore.threat_type == threat_type)
|
||||
|
||||
scores = query.order_by(ThreatScore.score.desc()).offset(skip).limit(limit).all()
|
||||
return scores
|
||||
165
backend/app/core/playbook_engine.py
Normal file
165
backend/app/core/playbook_engine.py
Normal file
@@ -0,0 +1,165 @@
|
||||
"""
|
||||
Playbook Execution Engine
|
||||
|
||||
Executes automated response playbooks based on triggers.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, List
|
||||
from datetime import datetime, timezone
|
||||
import asyncio
|
||||
|
||||
|
||||
class PlaybookEngine:
|
||||
"""Engine for executing playbooks"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize playbook engine"""
|
||||
self.actions_registry = {
|
||||
"send_notification": self._action_send_notification,
|
||||
"create_case": self._action_create_case,
|
||||
"isolate_host": self._action_isolate_host,
|
||||
"collect_artifact": self._action_collect_artifact,
|
||||
"block_ip": self._action_block_ip,
|
||||
"send_email": self._action_send_email,
|
||||
}
|
||||
|
||||
async def execute_playbook(
|
||||
self,
|
||||
playbook: Dict[str, Any],
|
||||
context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Execute a playbook
|
||||
|
||||
Args:
|
||||
playbook: Playbook definition
|
||||
context: Execution context with relevant data
|
||||
|
||||
Returns:
|
||||
Execution result
|
||||
"""
|
||||
results = []
|
||||
errors = []
|
||||
|
||||
actions = playbook.get("actions", [])
|
||||
|
||||
for action in actions:
|
||||
action_type = action.get("type")
|
||||
action_params = action.get("params", {})
|
||||
|
||||
try:
|
||||
# Get action handler
|
||||
handler = self.actions_registry.get(action_type)
|
||||
if not handler:
|
||||
errors.append(f"Unknown action type: {action_type}")
|
||||
continue
|
||||
|
||||
# Execute action
|
||||
result = await handler(action_params, context)
|
||||
results.append({
|
||||
"action": action_type,
|
||||
"status": "success",
|
||||
"result": result
|
||||
})
|
||||
except Exception as e:
|
||||
errors.append(f"Error in action {action_type}: {str(e)}")
|
||||
results.append({
|
||||
"action": action_type,
|
||||
"status": "failed",
|
||||
"error": str(e)
|
||||
})
|
||||
|
||||
return {
|
||||
"status": "completed" if not errors else "completed_with_errors",
|
||||
"results": results,
|
||||
"errors": errors
|
||||
}
|
||||
|
||||
async def _action_send_notification(
|
||||
self,
|
||||
params: Dict[str, Any],
|
||||
context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""Send a notification"""
|
||||
# In production, this would create a notification in the database
|
||||
# and push it via WebSocket
|
||||
return {
|
||||
"notification_sent": True,
|
||||
"message": params.get("message", "Playbook notification")
|
||||
}
|
||||
|
||||
async def _action_create_case(
|
||||
self,
|
||||
params: Dict[str, Any],
|
||||
context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""Create a new case"""
|
||||
# In production, this would create a case in the database
|
||||
return {
|
||||
"case_created": True,
|
||||
"case_title": params.get("title", "Automated Case"),
|
||||
"case_id": "placeholder_id"
|
||||
}
|
||||
|
||||
async def _action_isolate_host(
|
||||
self,
|
||||
params: Dict[str, Any],
|
||||
context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""Isolate a host"""
|
||||
# In production, this would call Velociraptor or other tools
|
||||
# to isolate the host from the network
|
||||
host_id = params.get("host_id")
|
||||
return {
|
||||
"host_isolated": True,
|
||||
"host_id": host_id
|
||||
}
|
||||
|
||||
async def _action_collect_artifact(
|
||||
self,
|
||||
params: Dict[str, Any],
|
||||
context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""Collect an artifact from a host"""
|
||||
# In production, this would trigger Velociraptor collection
|
||||
return {
|
||||
"collection_started": True,
|
||||
"artifact": params.get("artifact_name"),
|
||||
"client_id": params.get("client_id")
|
||||
}
|
||||
|
||||
async def _action_block_ip(
|
||||
self,
|
||||
params: Dict[str, Any],
|
||||
context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""Block an IP address"""
|
||||
# In production, this would update firewall rules
|
||||
ip_address = params.get("ip_address")
|
||||
return {
|
||||
"ip_blocked": True,
|
||||
"ip_address": ip_address
|
||||
}
|
||||
|
||||
async def _action_send_email(
|
||||
self,
|
||||
params: Dict[str, Any],
|
||||
context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""Send an email"""
|
||||
# In production, this would send an actual email
|
||||
return {
|
||||
"email_sent": True,
|
||||
"to": params.get("to"),
|
||||
"subject": params.get("subject")
|
||||
}
|
||||
|
||||
|
||||
def get_playbook_engine() -> PlaybookEngine:
|
||||
"""
|
||||
Factory function to create playbook engine
|
||||
|
||||
Returns:
|
||||
Configured PlaybookEngine instance
|
||||
"""
|
||||
return PlaybookEngine()
|
||||
198
backend/app/core/threat_intel.py
Normal file
198
backend/app/core/threat_intel.py
Normal file
@@ -0,0 +1,198 @@
|
||||
"""
|
||||
Threat Intelligence and Machine Learning Module
|
||||
|
||||
This module provides threat scoring, anomaly detection, and predictive analytics.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, List, Optional
|
||||
import random # For demo purposes - would use actual ML models in production
|
||||
|
||||
|
||||
class ThreatAnalyzer:
|
||||
"""Analyzes threats using ML models and heuristics"""
|
||||
|
||||
def __init__(self, model_version: str = "1.0"):
|
||||
"""
|
||||
Initialize threat analyzer
|
||||
|
||||
Args:
|
||||
model_version: Version of ML models to use
|
||||
"""
|
||||
self.model_version = model_version
|
||||
|
||||
def analyze_host(self, host_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Analyze a host for threats
|
||||
|
||||
Args:
|
||||
host_data: Host information and telemetry
|
||||
|
||||
Returns:
|
||||
Dictionary with threat score and indicators
|
||||
"""
|
||||
# In production, this would use ML models
|
||||
# For demo, using simple heuristics
|
||||
|
||||
score = 0.0
|
||||
confidence = 0.8
|
||||
indicators = []
|
||||
|
||||
# Check for suspicious patterns
|
||||
hostname = host_data.get("hostname", "")
|
||||
if "temp" in hostname.lower() or "test" in hostname.lower():
|
||||
score += 0.2
|
||||
indicators.append({
|
||||
"type": "suspicious_hostname",
|
||||
"description": "Hostname contains suspicious keywords",
|
||||
"severity": "low"
|
||||
})
|
||||
|
||||
# Check metadata for anomalies
|
||||
metadata = host_data.get("host_metadata", {})
|
||||
if metadata:
|
||||
# Check for unusual processes, connections, etc.
|
||||
if "suspicious_process" in str(metadata):
|
||||
score += 0.5
|
||||
indicators.append({
|
||||
"type": "suspicious_process",
|
||||
"description": "Unusual process detected",
|
||||
"severity": "high"
|
||||
})
|
||||
|
||||
# Normalize score
|
||||
score = min(score, 1.0)
|
||||
|
||||
return {
|
||||
"score": score,
|
||||
"confidence": confidence,
|
||||
"threat_type": self._classify_threat(score),
|
||||
"indicators": indicators,
|
||||
"ml_model_version": self.model_version
|
||||
}
|
||||
|
||||
def analyze_artifact(self, artifact_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Analyze an artifact for threats
|
||||
|
||||
Args:
|
||||
artifact_data: Artifact information
|
||||
|
||||
Returns:
|
||||
Dictionary with threat score and indicators
|
||||
"""
|
||||
score = 0.0
|
||||
confidence = 0.7
|
||||
indicators = []
|
||||
|
||||
artifact_type = artifact_data.get("artifact_type", "")
|
||||
value = artifact_data.get("value", "")
|
||||
|
||||
# Hash analysis
|
||||
if artifact_type == "hash":
|
||||
# In production, check against threat intelligence feeds
|
||||
if len(value) == 32: # MD5
|
||||
score += 0.3
|
||||
indicators.append({
|
||||
"type": "weak_hash",
|
||||
"description": "MD5 hashes are considered weak",
|
||||
"severity": "low"
|
||||
})
|
||||
|
||||
# IP analysis
|
||||
elif artifact_type == "ip":
|
||||
# Check if IP is in known malicious ranges
|
||||
if value.startswith("10.") or value.startswith("192.168."):
|
||||
score += 0.1 # Private IP, lower risk
|
||||
else:
|
||||
score += 0.4 # Public IP, higher scrutiny
|
||||
indicators.append({
|
||||
"type": "public_ip",
|
||||
"description": "Communication with public IP",
|
||||
"severity": "medium"
|
||||
})
|
||||
|
||||
# Domain analysis
|
||||
elif artifact_type == "domain":
|
||||
# Check for suspicious TLDs or patterns
|
||||
suspicious_tlds = [".ru", ".cn", ".tk", ".xyz"]
|
||||
if any(value.endswith(tld) for tld in suspicious_tlds):
|
||||
score += 0.6
|
||||
indicators.append({
|
||||
"type": "suspicious_tld",
|
||||
"description": f"Domain uses potentially suspicious TLD",
|
||||
"severity": "high"
|
||||
})
|
||||
|
||||
score = min(score, 1.0)
|
||||
|
||||
return {
|
||||
"score": score,
|
||||
"confidence": confidence,
|
||||
"threat_type": self._classify_threat(score),
|
||||
"indicators": indicators,
|
||||
"ml_model_version": self.model_version
|
||||
}
|
||||
|
||||
def detect_anomalies(
|
||||
self,
|
||||
historical_data: List[Dict[str, Any]],
|
||||
current_data: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Detect anomalies in current data compared to historical baseline
|
||||
|
||||
Args:
|
||||
historical_data: Historical baseline data
|
||||
current_data: Current data to analyze
|
||||
|
||||
Returns:
|
||||
Anomaly detection results
|
||||
"""
|
||||
# Simple anomaly detection based on statistical deviation
|
||||
# In production, use more sophisticated methods
|
||||
|
||||
anomalies = []
|
||||
score = 0.0
|
||||
|
||||
# Compare metrics
|
||||
if historical_data and len(historical_data) >= 3:
|
||||
# Calculate baseline
|
||||
# This is a simplified example
|
||||
anomalies.append({
|
||||
"type": "behavioral_anomaly",
|
||||
"description": "Behavior deviates from baseline",
|
||||
"severity": "medium"
|
||||
})
|
||||
score = 0.5
|
||||
|
||||
return {
|
||||
"is_anomaly": score > 0.4,
|
||||
"anomaly_score": score,
|
||||
"anomalies": anomalies
|
||||
}
|
||||
|
||||
def _classify_threat(self, score: float) -> str:
|
||||
"""Classify threat based on score"""
|
||||
if score >= 0.8:
|
||||
return "critical"
|
||||
elif score >= 0.6:
|
||||
return "high"
|
||||
elif score >= 0.4:
|
||||
return "medium"
|
||||
elif score >= 0.2:
|
||||
return "low"
|
||||
else:
|
||||
return "benign"
|
||||
|
||||
|
||||
def get_threat_analyzer(model_version: str = "1.0") -> ThreatAnalyzer:
|
||||
"""
|
||||
Factory function to create threat analyzer
|
||||
|
||||
Args:
|
||||
model_version: Version of ML models
|
||||
|
||||
Returns:
|
||||
Configured ThreatAnalyzer instance
|
||||
"""
|
||||
return ThreatAnalyzer(model_version)
|
||||
@@ -1,13 +1,16 @@
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
|
||||
from app.api.routes import auth, users, tenants, hosts, ingestion, vt, audit, notifications, velociraptor
|
||||
from app.api.routes import (
|
||||
auth, users, tenants, hosts, ingestion, vt, audit,
|
||||
notifications, velociraptor, playbooks, threat_intel, reports
|
||||
)
|
||||
from app.core.config import settings
|
||||
|
||||
app = FastAPI(
|
||||
title=settings.app_name,
|
||||
description="Multi-tenant threat hunting companion for Velociraptor",
|
||||
version="0.3.0"
|
||||
description="Multi-tenant threat hunting companion for Velociraptor with ML-powered threat detection",
|
||||
version="1.0.0"
|
||||
)
|
||||
|
||||
# Configure CORS
|
||||
@@ -29,6 +32,9 @@ app.include_router(vt.router, prefix="/api/vt", tags=["VirusTotal"])
|
||||
app.include_router(audit.router, prefix="/api/audit", tags=["Audit Logs"])
|
||||
app.include_router(notifications.router, prefix="/api/notifications", tags=["Notifications"])
|
||||
app.include_router(velociraptor.router, prefix="/api/velociraptor", tags=["Velociraptor"])
|
||||
app.include_router(playbooks.router, prefix="/api/playbooks", tags=["Playbooks"])
|
||||
app.include_router(threat_intel.router, prefix="/api/threat-intel", tags=["Threat Intelligence"])
|
||||
app.include_router(reports.router, prefix="/api/reports", tags=["Reports"])
|
||||
|
||||
|
||||
@app.get("/")
|
||||
@@ -36,8 +42,18 @@ async def root():
|
||||
"""Root endpoint"""
|
||||
return {
|
||||
"message": f"Welcome to {settings.app_name}",
|
||||
"version": "0.3.0",
|
||||
"docs": "/docs"
|
||||
"version": "1.0.0",
|
||||
"docs": "/docs",
|
||||
"features": [
|
||||
"JWT Authentication with 2FA",
|
||||
"Multi-tenant isolation",
|
||||
"Audit logging",
|
||||
"Real-time notifications",
|
||||
"Velociraptor integration",
|
||||
"ML-powered threat detection",
|
||||
"Automated playbooks",
|
||||
"Advanced reporting"
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
|
||||
45
backend/app/models/playbook.py
Normal file
45
backend/app/models/playbook.py
Normal file
@@ -0,0 +1,45 @@
|
||||
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey, Boolean, Text, JSON
|
||||
from sqlalchemy.orm import relationship
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from app.core.database import Base
|
||||
|
||||
|
||||
class Playbook(Base):
|
||||
__tablename__ = "playbooks"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
tenant_id = Column(Integer, ForeignKey("tenants.id"), nullable=False)
|
||||
name = Column(String, nullable=False, index=True)
|
||||
description = Column(Text, nullable=True)
|
||||
trigger_type = Column(String, nullable=False) # manual, scheduled, event
|
||||
trigger_config = Column(JSON, nullable=True)
|
||||
actions = Column(JSON, nullable=False) # List of action definitions
|
||||
is_enabled = Column(Boolean, default=True, nullable=False)
|
||||
created_by = Column(Integer, ForeignKey("users.id"), nullable=False)
|
||||
created_at = Column(DateTime, default=lambda: datetime.now(timezone.utc))
|
||||
updated_at = Column(DateTime, default=lambda: datetime.now(timezone.utc), onupdate=lambda: datetime.now(timezone.utc))
|
||||
|
||||
# Relationships
|
||||
tenant = relationship("Tenant")
|
||||
creator = relationship("User", foreign_keys=[created_by])
|
||||
executions = relationship("PlaybookExecution", back_populates="playbook")
|
||||
|
||||
|
||||
class PlaybookExecution(Base):
|
||||
__tablename__ = "playbook_executions"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
playbook_id = Column(Integer, ForeignKey("playbooks.id"), nullable=False)
|
||||
tenant_id = Column(Integer, ForeignKey("tenants.id"), nullable=False)
|
||||
status = Column(String, nullable=False) # pending, running, completed, failed
|
||||
started_at = Column(DateTime, default=lambda: datetime.now(timezone.utc))
|
||||
completed_at = Column(DateTime, nullable=True)
|
||||
result = Column(JSON, nullable=True)
|
||||
error_message = Column(Text, nullable=True)
|
||||
triggered_by = Column(Integer, ForeignKey("users.id"), nullable=True)
|
||||
|
||||
# Relationships
|
||||
playbook = relationship("Playbook", back_populates="executions")
|
||||
tenant = relationship("Tenant")
|
||||
trigger_user = relationship("User")
|
||||
43
backend/app/models/report_template.py
Normal file
43
backend/app/models/report_template.py
Normal file
@@ -0,0 +1,43 @@
|
||||
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey, Boolean, Text, JSON
|
||||
from sqlalchemy.orm import relationship
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from app.core.database import Base
|
||||
|
||||
|
||||
class ReportTemplate(Base):
|
||||
__tablename__ = "report_templates"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
tenant_id = Column(Integer, ForeignKey("tenants.id"), nullable=False)
|
||||
name = Column(String, nullable=False, index=True)
|
||||
description = Column(Text, nullable=True)
|
||||
template_type = Column(String, nullable=False) # case_summary, host_analysis, threat_report
|
||||
template_config = Column(JSON, nullable=False) # Configuration for report generation
|
||||
is_default = Column(Boolean, default=False, nullable=False)
|
||||
created_by = Column(Integer, ForeignKey("users.id"), nullable=False)
|
||||
created_at = Column(DateTime, default=lambda: datetime.now(timezone.utc))
|
||||
|
||||
# Relationships
|
||||
tenant = relationship("Tenant")
|
||||
creator = relationship("User")
|
||||
|
||||
|
||||
class Report(Base):
|
||||
__tablename__ = "reports"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
tenant_id = Column(Integer, ForeignKey("tenants.id"), nullable=False)
|
||||
template_id = Column(Integer, ForeignKey("report_templates.id"), nullable=True)
|
||||
title = Column(String, nullable=False)
|
||||
report_type = Column(String, nullable=False)
|
||||
format = Column(String, nullable=False) # pdf, html, json
|
||||
file_path = Column(String, nullable=True)
|
||||
status = Column(String, nullable=False) # generating, completed, failed
|
||||
generated_by = Column(Integer, ForeignKey("users.id"), nullable=False)
|
||||
generated_at = Column(DateTime, default=lambda: datetime.now(timezone.utc))
|
||||
|
||||
# Relationships
|
||||
tenant = relationship("Tenant")
|
||||
template = relationship("ReportTemplate")
|
||||
generator = relationship("User")
|
||||
26
backend/app/models/threat_score.py
Normal file
26
backend/app/models/threat_score.py
Normal file
@@ -0,0 +1,26 @@
|
||||
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey, Float, Text, JSON
|
||||
from sqlalchemy.orm import relationship
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from app.core.database import Base
|
||||
|
||||
|
||||
class ThreatScore(Base):
|
||||
__tablename__ = "threat_scores"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
tenant_id = Column(Integer, ForeignKey("tenants.id"), nullable=False)
|
||||
host_id = Column(Integer, ForeignKey("hosts.id"), nullable=True)
|
||||
artifact_id = Column(Integer, ForeignKey("artifacts.id"), nullable=True)
|
||||
score = Column(Float, nullable=False, index=True) # 0.0 to 1.0
|
||||
confidence = Column(Float, nullable=False) # 0.0 to 1.0
|
||||
threat_type = Column(String, nullable=False) # malware, suspicious, anomaly, etc.
|
||||
description = Column(Text, nullable=True)
|
||||
indicators = Column(JSON, nullable=True) # List of indicators that contributed to score
|
||||
ml_model_version = Column(String, nullable=True)
|
||||
created_at = Column(DateTime, default=lambda: datetime.now(timezone.utc), index=True)
|
||||
|
||||
# Relationships
|
||||
tenant = relationship("Tenant")
|
||||
host = relationship("Host")
|
||||
artifact = relationship("Artifact")
|
||||
55
backend/app/schemas/playbook.py
Normal file
55
backend/app/schemas/playbook.py
Normal file
@@ -0,0 +1,55 @@
|
||||
from pydantic import BaseModel
|
||||
from typing import Optional, List, Dict, Any
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class PlaybookBase(BaseModel):
|
||||
"""Base playbook schema"""
|
||||
name: str
|
||||
description: Optional[str] = None
|
||||
trigger_type: str
|
||||
trigger_config: Optional[Dict[str, Any]] = None
|
||||
actions: List[Dict[str, Any]]
|
||||
is_enabled: bool = True
|
||||
|
||||
|
||||
class PlaybookCreate(PlaybookBase):
|
||||
"""Schema for creating a playbook"""
|
||||
pass
|
||||
|
||||
|
||||
class PlaybookUpdate(BaseModel):
|
||||
"""Schema for updating a playbook"""
|
||||
name: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
trigger_type: Optional[str] = None
|
||||
trigger_config: Optional[Dict[str, Any]] = None
|
||||
actions: Optional[List[Dict[str, Any]]] = None
|
||||
is_enabled: Optional[bool] = None
|
||||
|
||||
|
||||
class PlaybookRead(PlaybookBase):
|
||||
"""Schema for reading playbook data"""
|
||||
id: int
|
||||
tenant_id: int
|
||||
created_by: int
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class PlaybookExecutionRead(BaseModel):
|
||||
"""Schema for playbook execution"""
|
||||
id: int
|
||||
playbook_id: int
|
||||
tenant_id: int
|
||||
status: str
|
||||
started_at: datetime
|
||||
completed_at: Optional[datetime]
|
||||
result: Optional[Dict[str, Any]]
|
||||
error_message: Optional[str]
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
54
backend/app/schemas/report.py
Normal file
54
backend/app/schemas/report.py
Normal file
@@ -0,0 +1,54 @@
|
||||
from pydantic import BaseModel
|
||||
from typing import Optional, Dict, Any
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class ReportTemplateBase(BaseModel):
|
||||
"""Base report template schema"""
|
||||
name: str
|
||||
description: Optional[str] = None
|
||||
template_type: str
|
||||
template_config: Dict[str, Any]
|
||||
is_default: bool = False
|
||||
|
||||
|
||||
class ReportTemplateCreate(ReportTemplateBase):
|
||||
"""Schema for creating a report template"""
|
||||
pass
|
||||
|
||||
|
||||
class ReportTemplateRead(ReportTemplateBase):
|
||||
"""Schema for reading report template data"""
|
||||
id: int
|
||||
tenant_id: int
|
||||
created_by: int
|
||||
created_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class ReportBase(BaseModel):
|
||||
"""Base report schema"""
|
||||
title: str
|
||||
report_type: str
|
||||
format: str
|
||||
|
||||
|
||||
class ReportCreate(ReportBase):
|
||||
"""Schema for creating a report"""
|
||||
template_id: Optional[int] = None
|
||||
|
||||
|
||||
class ReportRead(ReportBase):
|
||||
"""Schema for reading report data"""
|
||||
id: int
|
||||
tenant_id: int
|
||||
template_id: Optional[int]
|
||||
file_path: Optional[str]
|
||||
status: str
|
||||
generated_by: int
|
||||
generated_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
32
backend/app/schemas/threat_score.py
Normal file
32
backend/app/schemas/threat_score.py
Normal file
@@ -0,0 +1,32 @@
|
||||
from pydantic import BaseModel
|
||||
from typing import Optional, List, Dict, Any
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class ThreatScoreBase(BaseModel):
|
||||
"""Base threat score schema"""
|
||||
score: float
|
||||
confidence: float
|
||||
threat_type: str
|
||||
description: Optional[str] = None
|
||||
indicators: Optional[List[Dict[str, Any]]] = None
|
||||
|
||||
|
||||
class ThreatScoreCreate(ThreatScoreBase):
|
||||
"""Schema for creating a threat score"""
|
||||
host_id: Optional[int] = None
|
||||
artifact_id: Optional[int] = None
|
||||
ml_model_version: Optional[str] = None
|
||||
|
||||
|
||||
class ThreatScoreRead(ThreatScoreBase):
|
||||
"""Schema for reading threat score data"""
|
||||
id: int
|
||||
tenant_id: int
|
||||
host_id: Optional[int]
|
||||
artifact_id: Optional[int]
|
||||
ml_model_version: Optional[str]
|
||||
created_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
Reference in New Issue
Block a user