AI Security Sentinel: local LLM scans logs every 5 minutes
Background thread runs qwen2.5 to analyze new security log entries: - Aggregates new entries by IP since last scan - Sends batch to local LLM with security analysis prompt - LLM classifies each IP: threat level, action, attack type, reason - Auto-bans IPs the AI recommends banning (via fail2ban) - Logs all verdicts and bans to /var/log/llm-team-sentinel.log - Logs AI bans to security log as AI_BAN events API: - /api/admin/sentinel — sentinel status, stats, recent verdicts Threat Intel tab enhancement: - Sentinel status card with magenta accent (distinct from threat cards) - Shows: model, scan count, ban count, last run, interval - Recent AI verdicts table: action, IP, attack type, reason - Errors displayed inline Security prompt tuning: - Explicit rules for common attack patterns - Low temperature (0.1) for consistent classification - JSON-only response format for reliable parsing Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
f1bb2a92e7
commit
de4ca533dd
259
llm_team_ui.py
259
llm_team_ui.py
@ -726,8 +726,55 @@ async function loadThreats() {
|
||||
var r = await fetch('/api/admin/security');
|
||||
var d = await r.json();
|
||||
var ips = d.ips || [];
|
||||
|
||||
// Also fetch sentinel status
|
||||
var sr = await fetch('/api/admin/sentinel').catch(function(){return{json:function(){return{}}}});
|
||||
var sentinel = await sr.json();
|
||||
|
||||
view.textContent = '';
|
||||
|
||||
// Sentinel status card
|
||||
var sentinelCard = document.createElement('div');
|
||||
sentinelCard.style.cssText = 'background:rgba(0,0,0,0.3);border:2px solid rgba(217,70,239,0.3);border-radius:2px;padding:14px;margin-bottom:16px;backdrop-filter:blur(16px)';
|
||||
var sHeader = document.createElement('div');
|
||||
sHeader.style.cssText = 'display:flex;align-items:center;gap:8px;margin-bottom:8px';
|
||||
var sDot = document.createElement('div');
|
||||
sDot.style.cssText = 'width:8px;height:8px;border-radius:50%;background:#d946ef;box-shadow:0 0 8px #d946ef;animation:pulse-dot 2s ease-in-out infinite';
|
||||
var sTitle = document.createElement('span');
|
||||
sTitle.style.cssText = 'font-family:JetBrains Mono,monospace;font-size:11px;text-transform:uppercase;letter-spacing:1.5px;color:#d946ef;font-weight:700';
|
||||
sTitle.textContent = 'AI Sentinel — ' + (sentinel.model || '?');
|
||||
sHeader.appendChild(sDot);sHeader.appendChild(sTitle);sentinelCard.appendChild(sHeader);
|
||||
var sStats = document.createElement('div');
|
||||
sStats.style.cssText = 'font-family:JetBrains Mono,monospace;font-size:10px;color:#7a7872;display:flex;gap:16px;margin-bottom:8px';
|
||||
var ss = sentinel.stats || {};
|
||||
sStats.textContent = 'Scans: ' + (ss.scans||0) + ' | AI Bans: ' + (ss.bans||0) + ' | Last: ' + (ss.last_run||'not yet') + ' | Interval: ' + (sentinel.interval||300) + 's';
|
||||
sentinelCard.appendChild(sStats);
|
||||
if (ss.last_error) {
|
||||
var sErr = document.createElement('div');
|
||||
sErr.style.cssText = 'font-family:JetBrains Mono,monospace;font-size:9px;color:#e05252;border-left:2px solid #e05252;padding-left:8px';
|
||||
sErr.textContent = 'Last error: ' + ss.last_error;
|
||||
sentinelCard.appendChild(sErr);
|
||||
}
|
||||
// Recent AI verdicts
|
||||
var verdicts = sentinel.recent_verdicts || [];
|
||||
if (verdicts.length) {
|
||||
var vTitle = document.createElement('div');
|
||||
vTitle.style.cssText = 'font-family:JetBrains Mono,monospace;font-size:8px;text-transform:uppercase;letter-spacing:2px;color:#d946ef;margin:10px 0 6px;opacity:0.6';
|
||||
vTitle.textContent = 'Recent AI Verdicts';
|
||||
sentinelCard.appendChild(vTitle);
|
||||
verdicts.slice(0,8).forEach(function(v){
|
||||
var vLine = document.createElement('div');
|
||||
vLine.style.cssText = 'font-family:JetBrains Mono,monospace;font-size:10px;color:#7a7872;padding:3px 0;border-bottom:1px solid rgba(42,45,53,0.3);display:flex;gap:8px';
|
||||
var actionColor = v.action === 'ban' ? '#e05252' : v.action === 'monitor' ? '#f59e0b' : '#7a7872';
|
||||
vLine.innerHTML = '<span style="color:'+actionColor+';min-width:50px;font-weight:700">'+esc(v.action||'?').toUpperCase()+'</span>'
|
||||
+ '<span style="min-width:120px">'+esc(v.ip||'?')+'</span>'
|
||||
+ '<span style="color:#c084fc">'+esc(v.attack_type||'?')+'</span>'
|
||||
+ '<span style="flex:1;opacity:0.6">'+esc(v.reason||'')+'</span>';
|
||||
sentinelCard.appendChild(vLine);
|
||||
});
|
||||
}
|
||||
view.appendChild(sentinelCard);
|
||||
|
||||
// Summary stats
|
||||
var summary = document.createElement('div');
|
||||
summary.className = 'threat-summary';
|
||||
@ -5494,6 +5541,218 @@ def run_extract(config):
|
||||
_save_pipeline("extract", prompt or source, steps, result_data, all_models, start)
|
||||
|
||||
|
||||
# ─── AI SECURITY SENTINEL ─────────────────────────────────────
|
||||
|
||||
SENTINEL_LOG = "/var/log/llm-team-sentinel.log"
|
||||
SENTINEL_MODEL = "qwen2.5:latest"
|
||||
SENTINEL_INTERVAL = 300 # 5 minutes
|
||||
_sentinel_last_pos = 0
|
||||
_sentinel_results = [] # last 50 analyses
|
||||
_sentinel_stats = {"scans": 0, "bans": 0, "last_run": None, "last_error": None}
|
||||
|
||||
def _sentinel_log_entry(msg):
|
||||
"""Write to sentinel log file."""
|
||||
try:
|
||||
with open(SENTINEL_LOG, "a") as f:
|
||||
f.write(f"{time.strftime('%Y-%m-%d %H:%M:%S')} {msg}\n")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _sentinel_scan():
|
||||
"""Read new security log entries and analyze with local AI."""
|
||||
global _sentinel_last_pos
|
||||
import subprocess, collections
|
||||
|
||||
_sentinel_stats["last_run"] = time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
_sentinel_stats["scans"] += 1
|
||||
|
||||
# Read new lines since last scan
|
||||
try:
|
||||
with open("/var/log/llm-team-security.log") as f:
|
||||
f.seek(0, 2) # end of file
|
||||
file_size = f.tell()
|
||||
if _sentinel_last_pos > file_size:
|
||||
_sentinel_last_pos = 0 # log rotated
|
||||
f.seek(_sentinel_last_pos)
|
||||
new_lines = f.readlines()
|
||||
_sentinel_last_pos = f.tell()
|
||||
except Exception as e:
|
||||
_sentinel_stats["last_error"] = str(e)
|
||||
return
|
||||
|
||||
if not new_lines:
|
||||
_sentinel_log_entry("SCAN_COMPLETE new_lines=0 action=none")
|
||||
return
|
||||
|
||||
# Aggregate by IP
|
||||
ip_activity = collections.defaultdict(list)
|
||||
for line in new_lines:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
ip = None
|
||||
for token in line.split():
|
||||
if token.startswith("ip="):
|
||||
ip = token[3:]
|
||||
break
|
||||
if ip and not ip.startswith("192.168."):
|
||||
ip_activity[ip].append(line)
|
||||
|
||||
if not ip_activity:
|
||||
_sentinel_log_entry(f"SCAN_COMPLETE new_lines={len(new_lines)} external_ips=0 action=none")
|
||||
return
|
||||
|
||||
# Get currently banned IPs to skip
|
||||
banned = set()
|
||||
try:
|
||||
for jail in ["llm-team-exploit", "llm-team-login"]:
|
||||
result = subprocess.run(["fail2ban-client", "status", jail], capture_output=True, text=True, timeout=5)
|
||||
for line in result.stdout.split("\n"):
|
||||
if "Banned IP list" in line:
|
||||
for ip in line.split(":", 1)[1].strip().split():
|
||||
banned.add(ip.strip())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Build analysis prompt for the AI
|
||||
analysis_items = []
|
||||
for ip, lines in ip_activity.items():
|
||||
if ip in banned:
|
||||
continue
|
||||
summary = f"IP {ip} ({len(lines)} events):\n"
|
||||
for l in lines[:8]: # cap at 8 lines per IP
|
||||
summary += f" {l}\n"
|
||||
analysis_items.append((ip, summary, lines))
|
||||
|
||||
if not analysis_items:
|
||||
_sentinel_log_entry(f"SCAN_COMPLETE new_lines={len(new_lines)} all_banned_or_lan action=none")
|
||||
return
|
||||
|
||||
# Batch analysis prompt
|
||||
prompt = (
|
||||
"You are a web application security analyst. Analyze these log entries from the last 5 minutes "
|
||||
"and classify each IP. Respond with ONLY a JSON array, one object per IP:\n"
|
||||
'[{"ip": "x.x.x.x", "threat": "none|low|medium|high|critical", "action": "ignore|monitor|ban", '
|
||||
'"reason": "brief reason", "attack_type": "scanner|bruteforce|exploit|bot|legitimate"}]\n\n'
|
||||
"Guidelines:\n"
|
||||
"- /.git/config, /wp-admin, /phpmyadmin, /xmlrpc.php, /env, /admin.php = exploit scanner → ban\n"
|
||||
"- Multiple different user agents from same IP = rotating scanner → ban\n"
|
||||
"- /robots.txt or /favicon.ico alone = harmless bot → ignore\n"
|
||||
"- Failed logins = bruteforce if >2 attempts → ban\n"
|
||||
"- Headless chrome, bot UAs doing probing = automated scanner → ban\n"
|
||||
"- Single 404 on a common path = probably harmless → ignore\n\n"
|
||||
"Log entries:\n\n"
|
||||
)
|
||||
for ip, summary, _ in analysis_items[:15]: # max 15 IPs per scan
|
||||
prompt += summary + "\n"
|
||||
|
||||
# Query local AI
|
||||
try:
|
||||
cfg = load_config()
|
||||
base = cfg["providers"]["ollama"].get("base_url", "http://localhost:11434")
|
||||
resp = requests.post(f"{base}/api/generate", json={
|
||||
"model": SENTINEL_MODEL, "prompt": prompt, "stream": False,
|
||||
"options": {"num_ctx": 4096, "temperature": 0.1}
|
||||
}, timeout=60)
|
||||
resp.raise_for_status()
|
||||
ai_response = resp.json()["response"]
|
||||
except Exception as e:
|
||||
_sentinel_stats["last_error"] = f"AI query failed: {e}"
|
||||
_sentinel_log_entry(f"AI_ERROR error={e}")
|
||||
return
|
||||
|
||||
# Parse AI response
|
||||
try:
|
||||
# Extract JSON from response (handle markdown code blocks)
|
||||
text = ai_response.strip()
|
||||
if "```" in text:
|
||||
text = text.split("```")[1]
|
||||
if text.startswith("json"):
|
||||
text = text[4:]
|
||||
# Find the JSON array
|
||||
start_idx = text.find("[")
|
||||
end_idx = text.rfind("]") + 1
|
||||
if start_idx >= 0 and end_idx > start_idx:
|
||||
text = text[start_idx:end_idx]
|
||||
verdicts = json.loads(text)
|
||||
except Exception as e:
|
||||
_sentinel_stats["last_error"] = f"Parse failed: {e}"
|
||||
_sentinel_log_entry(f"PARSE_ERROR response={ai_response[:200]}")
|
||||
return
|
||||
|
||||
# Execute actions
|
||||
ban_count = 0
|
||||
for v in verdicts:
|
||||
ip = v.get("ip", "")
|
||||
action = v.get("action", "ignore")
|
||||
threat = v.get("threat", "low")
|
||||
reason = v.get("reason", "")
|
||||
attack_type = v.get("attack_type", "unknown")
|
||||
|
||||
result_entry = {
|
||||
"ip": ip, "threat": threat, "action": action,
|
||||
"reason": reason, "attack_type": attack_type,
|
||||
"time": time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
}
|
||||
_sentinel_results.append(result_entry)
|
||||
if len(_sentinel_results) > 50:
|
||||
_sentinel_results.pop(0)
|
||||
|
||||
if action == "ban" and ip and not ip.startswith("192.168."):
|
||||
try:
|
||||
subprocess.run(["fail2ban-client", "set", "llm-team-exploit", "banip", ip],
|
||||
capture_output=True, text=True, timeout=5)
|
||||
ban_count += 1
|
||||
sec_log.warning("AI_BAN ip=%s threat=%s reason=%s attack=%s", ip, threat, reason, attack_type)
|
||||
_sentinel_log_entry(f"AI_BAN ip={ip} threat={threat} reason={reason} attack_type={attack_type}")
|
||||
except Exception as e:
|
||||
_sentinel_log_entry(f"BAN_FAILED ip={ip} error={e}")
|
||||
else:
|
||||
_sentinel_log_entry(f"AI_VERDICT ip={ip} threat={threat} action={action} reason={reason} attack_type={attack_type}")
|
||||
|
||||
_sentinel_stats["bans"] += ban_count
|
||||
_sentinel_log_entry(f"SCAN_COMPLETE new_lines={len(new_lines)} ips_analyzed={len(analysis_items)} verdicts={len(verdicts)} bans={ban_count}")
|
||||
|
||||
|
||||
def _sentinel_loop():
|
||||
"""Background loop running every SENTINEL_INTERVAL seconds."""
|
||||
global _sentinel_last_pos
|
||||
# Start from end of file (only analyze new entries)
|
||||
try:
|
||||
with open("/var/log/llm-team-security.log") as f:
|
||||
f.seek(0, 2)
|
||||
_sentinel_last_pos = f.tell()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
_sentinel_log_entry("SENTINEL_START model=" + SENTINEL_MODEL + " interval=" + str(SENTINEL_INTERVAL) + "s")
|
||||
while True:
|
||||
time.sleep(SENTINEL_INTERVAL)
|
||||
try:
|
||||
_sentinel_scan()
|
||||
except Exception as e:
|
||||
_sentinel_stats["last_error"] = str(e)
|
||||
_sentinel_log_entry(f"SENTINEL_ERROR {e}")
|
||||
|
||||
|
||||
# API for sentinel status
|
||||
@app.route("/api/admin/sentinel")
|
||||
@admin_required
|
||||
def admin_sentinel_status():
|
||||
return jsonify({
|
||||
"stats": _sentinel_stats,
|
||||
"recent_verdicts": list(reversed(_sentinel_results[-20:])),
|
||||
"model": SENTINEL_MODEL,
|
||||
"interval": SENTINEL_INTERVAL
|
||||
})
|
||||
|
||||
|
||||
# Start sentinel thread
|
||||
_sentinel_thread = threading.Thread(target=_sentinel_loop, daemon=True)
|
||||
_sentinel_thread.start()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("\n LLM Team UI running at http://localhost:5000\n")
|
||||
print(f" AI Sentinel active: {SENTINEL_MODEL} scanning every {SENTINEL_INTERVAL}s\n")
|
||||
app.run(host="127.0.0.1", port=5000, debug=False, threaded=True)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user