Self-Analysis: AI reports from system's own data + Lab experiments API

4 one-click self-analysis reports in Lab:
1. Threat Intelligence Report — security logs → attack taxonomy,
   attacker profiling, predictive analysis, recommendations
2. Model Performance Analysis — 96 team runs → usage patterns,
   model workload, response efficiency, optimization opportunities
3. Usage Analytics — nginx access logs → traffic patterns, feature
   usage, user journey mapping, UX recommendations
4. Security Posture Assessment — combined audit of security logs,
   sentinel verdicts, fail2ban, threat intel DB → risk rating

API: POST /api/self-analyze
- type: threat_intel|model_performance|access_patterns|security_posture
- model: which local model to use (default qwen2.5)
- Returns structured report from real system data

Lab UI:
- Green-bordered Self-Analysis card above experiment templates
- Click any report → runs analysis in background → result panel
  expands inline with full report (scrollable, closeable)
- Loading state shows "Analyzing..." during generation

Each report analyzes REAL data: actual security logs, actual run
history, actual nginx access patterns — not synthetic test data.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
root 2026-03-26 04:42:07 -05:00
parent ca660cbd10
commit 28e641f939

View File

@ -3385,8 +3385,12 @@ LAB_HTML = r"""
<h3>Your Experiments <button class="btn btn-p" style="margin-left:auto" onclick="showCreate()">+ New</button></h3> <h3>Your Experiments <button class="btn btn-p" style="margin-left:auto" onclick="showCreate()">+ New</button></h3>
<div id="exp-list"><div class="empty">Loading...</div></div> <div id="exp-list"><div class="empty">Loading...</div></div>
</div> </div>
<div class="card" style="border-color:rgba(74,222,128,0.2)">
<h3 style="color:var(--green)">Self-Analysis <span style="font-size:9px;color:var(--text2);font-weight:400;text-transform:none;letter-spacing:0">AI reports from your own system data</span></h3>
<div style="display:grid;gap:8px" id="self-reports"></div>
</div>
<div class="card" id="templates-card"> <div class="card" id="templates-card">
<h3>Templates <span style="font-size:9px;color:var(--text2);font-weight:400;text-transform:none;letter-spacing:0">click to auto-fill the create form</span></h3> <h3>Experiment Templates <span style="font-size:9px;color:var(--text2);font-weight:400;text-transform:none;letter-spacing:0">click to auto-fill the create form</span></h3>
<div style="display:grid;gap:8px" id="template-list"></div> <div style="display:grid;gap:8px" id="template-list"></div>
</div> </div>
<div id="create-form" class="card" style="display:none;border-color:var(--green)"> <div id="create-form" class="card" style="display:none;border-color:var(--green)">
@ -3797,6 +3801,80 @@ var LAB_TEMPLATES = [
} }
]; ];
// SELF-ANALYSIS REPORTS
var SELF_REPORTS = [
{id:'threat_intel', name:'Threat Intelligence Report', desc:'Analyze security logs to identify attack patterns, profile attackers, predict next moves, and recommend defenses', icon:'shield'},
{id:'model_performance', name:'Model Performance Analysis', desc:'Analyze your 96 team runs to find which models perform best at which tasks and where to optimize', icon:'chart'},
{id:'access_patterns', name:'Usage Analytics', desc:'Analyze nginx logs to understand traffic patterns, feature usage, user journeys, and UX opportunities', icon:'eye'},
{id:'security_posture', name:'Security Posture Assessment', desc:'Combined audit of security logs, sentinel verdicts, fail2ban status, and threat intel DB — overall risk rating', icon:'lock'}
];
function renderSelfReports() {
var container = document.getElementById('self-reports');
if (!container) return;
container.textContent = '';
SELF_REPORTS.forEach(function(r) {
var card = document.createElement('div');
card.style.cssText = 'background:rgba(0,0,0,0.25);border:2px solid rgba(74,222,128,0.2);border-radius:2px;padding:12px;cursor:pointer;transition:all 0.15s;display:flex;align-items:center;gap:12px';
card.onmouseenter = function(){card.style.borderColor='var(--green)'};
card.onmouseleave = function(){card.style.borderColor='rgba(74,222,128,0.2)'};
card.onclick = function(){runSelfReport(r.id, card)};
var info = document.createElement('div'); info.style.flex = '1';
var name = document.createElement('div');
name.style.cssText = 'font-weight:700;font-size:12px;margin-bottom:3px';
name.textContent = r.name;
var desc = document.createElement('div');
desc.style.cssText = 'font-size:11px;color:var(--text2);line-height:1.4';
desc.textContent = r.desc;
info.appendChild(name); info.appendChild(desc); card.appendChild(info);
var btn = document.createElement('span');
btn.style.cssText = 'font-family:JetBrains Mono,monospace;font-size:9px;text-transform:uppercase;letter-spacing:1px;color:var(--green);white-space:nowrap';
btn.textContent = 'Run →';
card.appendChild(btn);
container.appendChild(card);
});
}
async function runSelfReport(type, card) {
var origBorder = card.style.borderColor;
card.style.borderColor = 'var(--green)';
var btn = card.querySelector('span:last-child');
btn.textContent = 'Analyzing...';
btn.style.color = 'var(--accent)';
// Find or create result panel
var resultId = 'report-' + type;
var existing = document.getElementById(resultId);
if (existing) existing.remove();
try {
var r = await fetch('/api/self-analyze', {method:'POST', headers:{'Content-Type':'application/json'}, body:JSON.stringify({type:type, model:'qwen2.5:latest'})});
var d = await r.json();
if (d.error) { toast(d.error, false); return; }
var panel = document.createElement('div');
panel.id = resultId;
panel.style.cssText = 'background:rgba(0,0,0,0.3);border:2px solid var(--green);border-radius:2px;padding:16px;margin-top:8px;max-height:500px;overflow-y:auto';
var title = document.createElement('div');
title.style.cssText = 'font-family:JetBrains Mono,monospace;font-size:9px;text-transform:uppercase;letter-spacing:1.5px;color:var(--green);margin-bottom:8px;display:flex;justify-content:space-between';
title.textContent = type.replace(/_/g,' ') + '' + d.model;
var closeBtn = document.createElement('span');
closeBtn.style.cssText = 'cursor:pointer;opacity:0.6';
closeBtn.textContent = '';
closeBtn.onclick = function(e){e.stopPropagation();panel.remove()};
title.appendChild(closeBtn);
panel.appendChild(title);
var content = document.createElement('div');
content.style.cssText = 'font-size:12px;line-height:1.7;white-space:pre-wrap;color:var(--text)';
content.textContent = d.report;
panel.appendChild(content);
card.parentNode.insertBefore(panel, card.nextSibling);
toast('Report generated', true);
} catch(e) { toast('Error: '+e.message, false); }
btn.textContent = 'Run →';
btn.style.color = 'var(--green)';
card.style.borderColor = origBorder;
}
renderSelfReports();
function renderTemplates() { function renderTemplates() {
var el = document.getElementById('template-list'); var el = document.getElementById('template-list');
if (!el) return; if (!el) return;
@ -5509,6 +5587,111 @@ loadRuns();
</body></html>""" </body></html>"""
@app.route("/api/self-analyze", methods=["POST"])
@admin_required
def self_analyze():
"""Run AI analysis on the system's own data. Generates reports from logs, runs, and security data."""
data = request.json or {}
report_type = data.get("type", "threat_intel")
model = data.get("model", "qwen2.5:latest")
# Gather data based on report type
context = ""
if report_type == "threat_intel":
try:
with open("/var/log/llm-team-security.log") as f:
lines = [l.strip() for l in f.readlines() if "192.168" not in l]
context = f"SECURITY LOG ({len(lines)} external entries, last 80):\n" + "\n".join(lines[-80:])
except Exception:
context = "No security log data available."
prompt = (
"You are a threat intelligence analyst. Analyze these server logs from a PRIVATE web application and produce a concise STRATEGIC THREAT REPORT.\n\n"
f"{context}\n\n"
"Sections: 1) EXECUTIVE SUMMARY 2) ATTACK TAXONOMY with counts 3) ATTACKER PROFILING 4) PREDICTIVE ANALYSIS 5) TOP 5 ACTIONABLE RECOMMENDATIONS"
)
elif report_type == "model_performance":
try:
with get_db() as conn:
with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cur:
cur.execute("SELECT mode, models_used, jsonb_array_length(responses) as resp_count, LENGTH(responses::text) as resp_size, LENGTH(prompt) as prompt_len FROM team_runs WHERE archived=false ORDER BY created_at DESC LIMIT 50")
runs = cur.fetchall()
context = json.dumps([dict(r) for r in runs], default=str)[:6000]
except Exception:
context = "No run data available."
prompt = (
f"Analyze this dataset of AI orchestration runs.\n\nDATA:\n{context}\n\n"
"Produce a MODEL PERFORMANCE REPORT: 1) USAGE PATTERNS 2) MODEL WORKLOAD 3) RESPONSE EFFICIENCY 4) OPTIMIZATION OPPORTUNITIES 5) RECOMMENDED EXPERIMENTS"
)
elif report_type == "access_patterns":
try:
with open("/var/log/nginx/access.log") as f:
lines = f.readlines()[-200:]
context = "NGINX ACCESS LOG (last 200 entries):\n" + "".join(lines)
except Exception:
context = "No access log data."
prompt = (
f"Analyze these web server access logs for a private AI orchestration platform.\n\n{context}\n\n"
"Produce a USAGE ANALYTICS REPORT: 1) TRAFFIC PATTERNS (peak times, frequency) 2) FEATURE USAGE (which pages/APIs are used most) "
"3) USER JOURNEY (typical workflow sequence) 4) PERFORMANCE INSIGHTS 5) UX RECOMMENDATIONS"
)
elif report_type == "security_posture":
# Combine multiple data sources
sec_lines = ""
try:
with open("/var/log/llm-team-security.log") as f:
sec_lines = "\n".join([l.strip() for l in f.readlines() if "192.168" not in l][-40:])
except Exception:
pass
sentinel_lines = ""
try:
with open("/var/log/llm-team-sentinel.log") as f:
sentinel_lines = "\n".join(f.readlines()[-20:])
except Exception:
pass
threat_count = 0
try:
with get_db() as conn:
with conn.cursor() as cur:
cur.execute("SELECT COUNT(*) FROM threat_intel")
threat_count = cur.fetchone()[0]
except Exception:
pass
import subprocess
banned = ""
try:
r = subprocess.run(["fail2ban-client", "status", "llm-team-exploit"], capture_output=True, text=True, timeout=5)
banned = r.stdout
except Exception:
pass
context = f"SECURITY LOG (external, last 40):\n{sec_lines}\n\nSENTINEL LOG:\n{sentinel_lines}\n\nTHREAT INTEL DB: {threat_count} profiled IPs\n\nFAIL2BAN STATUS:\n{banned}"
prompt = (
f"You are auditing the security posture of a private web application.\n\n{context}\n\n"
"Produce a SECURITY POSTURE ASSESSMENT: 1) OVERALL RISK RATING (1-10) 2) DEFENSE EFFECTIVENESS (what's working) "
"3) GAPS AND WEAKNESSES 4) INCIDENT TIMELINE (recent events) 5) PRIORITY HARDENING STEPS"
)
else:
return jsonify({"error": f"Unknown report type: {report_type}"}), 400
# Run analysis
try:
cfg = load_config()
base = cfg["providers"]["ollama"].get("base_url", "http://localhost:11434")
resp = requests.post(f"{base}/api/generate", json={
"model": model, "prompt": prompt, "stream": False,
"options": {"num_ctx": 8192, "temperature": 0.2}
}, timeout=120)
resp.raise_for_status()
report = resp.json()["response"]
except Exception as e:
return jsonify({"error": str(e)}), 500
return jsonify({"report": report, "type": report_type, "model": model, "data_size": len(context)})
@app.route("/api/pipelines") @app.route("/api/pipelines")
@login_required @login_required
def get_pipelines(): def get_pipelines():