profit a304895249 Add bug status tracking with API and UI
Implements full bug lifecycle management (open → in_progress → resolved):

Bug Watcher (testing/oversight/bug_watcher.py):
- Add BugStatus enum with open/in_progress/resolved states
- Add SQLite persistence with status tracking and indexes
- New methods: update_bug_status(), get_bug(), log_bug()
- Extended CLI: update, get, log commands with filters

API Endpoints (ui/server.ts):
- GET /api/bugs - List bugs with status/severity/phase filters
- GET /api/bugs/summary - Bug statistics by status and severity
- GET /api/bugs/:id - Single bug details
- POST /api/bugs - Log new bug
- PATCH /api/bugs/:id - Update bug status

UI Dashboard:
- New "Bugs" tab with summary cards (Total/Open/In Progress/Resolved)
- Filter dropdowns for status and severity
- Bug list with status badges and severity indicators
- Detail panel with action buttons for status transitions
- WebSocket broadcasts for real-time updates

CLI Wrapper (bin/bugs):
- bugs list [--status X] [--severity Y]
- bugs get <id>
- bugs log -m "message" [--severity high]
- bugs update <id> <status> [--notes "..."]
- bugs status

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-24 17:17:43 -05:00

1067 lines
39 KiB
Python

"""
Bug Window Watcher
==================
Real-time monitoring of every pipeline stage with anomaly detection.
Features:
- Monitors all stages in real-time
- Surfaces anomalies, regressions, unhandled errors
- Links findings to phase, directory, STATUS, and checkpoint entries
"""
import json
import time
import hashlib
import sqlite3
import subprocess
from datetime import datetime, timezone, timedelta
from dataclasses import dataclass, field, asdict
from enum import Enum
from pathlib import Path
from typing import Any, Optional
import redis
class AnomalyType(str, Enum):
"""Types of anomalies the watcher can detect"""
UNHANDLED_ERROR = "unhandled_error"
REGRESSION = "regression"
PERFORMANCE_DEGRADATION = "performance_degradation"
MISSING_ARTIFACT = "missing_artifact"
STATE_INCONSISTENCY = "state_inconsistency"
HEALTH_CHECK_FAILURE = "health_check_failure"
DEPENDENCY_UNAVAILABLE = "dependency_unavailable"
TIMEOUT = "timeout"
UNEXPECTED_OUTPUT = "unexpected_output"
SECURITY_VIOLATION = "security_violation"
class Severity(str, Enum):
"""Severity levels for anomalies"""
CRITICAL = "critical" # System compromised, immediate action
HIGH = "high" # Major functionality impacted
MEDIUM = "medium" # Degraded but functional
LOW = "low" # Minor issue, informational
INFO = "info" # Tracking only
class BugStatus(str, Enum):
"""Status tracking for bugs/anomalies"""
OPEN = "open" # Newly detected, not yet addressed
IN_PROGRESS = "in_progress" # Being worked on
RESOLVED = "resolved" # Fixed and verified
@dataclass
class Anomaly:
"""Represents a detected anomaly"""
id: str
type: AnomalyType
severity: Severity
phase: int
phase_name: str
directory: str
message: str
details: dict = field(default_factory=dict)
stack_trace: Optional[str] = None
checkpoint_id: Optional[str] = None
status_file: Optional[str] = None
detected_at: str = ""
# Status tracking
status: BugStatus = BugStatus.OPEN
resolved: bool = False # Kept for backwards compatibility
resolution_notes: Optional[str] = None
assigned_to: Optional[str] = None
updated_at: Optional[str] = None
def __post_init__(self):
if not self.detected_at:
self.detected_at = datetime.now(timezone.utc).isoformat()
if not self.id:
self.id = f"anom-{hashlib.sha256(f'{self.type}{self.phase}{self.message}{self.detected_at}'.encode()).hexdigest()[:12]}"
# Sync resolved with status for backwards compatibility
if self.resolved and self.status == BugStatus.OPEN:
self.status = BugStatus.RESOLVED
elif self.status == BugStatus.RESOLVED:
self.resolved = True
@dataclass
class WatcherState:
"""Current state of the bug watcher"""
active: bool = False
started_at: Optional[str] = None
anomalies_detected: int = 0
phases_watched: list = field(default_factory=list)
last_scan_at: Optional[str] = None
error_count: int = 0
class BugWindowWatcher:
"""
Real-time anomaly detection across all pipeline stages.
Monitors:
- Phase transitions and state changes
- Error logs and stack traces
- Performance metrics and timeouts
- Dependency availability
- Artifact integrity
- Security boundaries
"""
# Phase definitions
PHASES = {
1: "Foundation (Vault + Basic Infrastructure)",
2: "Vault Policy Engine",
3: "Execution Pipeline",
4: "Promotion and Revocation Engine",
5: "Agent Bootstrapping",
6: "Pipeline DSL, Agent Templates, Testing Framework",
7: "Hierarchical Teams & Learning System",
8: "Production Hardening",
9: "External Integrations",
10: "Multi-Tenant Support",
11: "Agent Marketplace",
12: "Observability",
}
# Phase -> key directories mapping
PHASE_DIRECTORIES = {
1: ["ledger", "bin"],
2: ["runtime"],
3: ["preflight", "wrappers", "evidence"],
4: ["runtime"],
5: ["agents", "checkpoint", "orchestrator"],
6: ["pipeline", "tests"],
7: ["teams", "analytics", "memory"],
8: ["runtime", "testing/oversight"],
9: ["integrations"],
10: ["teams"],
11: ["agents"],
12: ["analytics", "ui"],
}
def __init__(self, base_path: str = "/opt/agent-governance"):
self.base_path = Path(base_path)
self.ledger_db = self.base_path / "ledger" / "governance.db"
self.bug_db = self.base_path / "testing" / "oversight" / "bug_watcher.db"
self.checkpoint_dir = self.base_path / "checkpoint" / "storage"
self.state = WatcherState()
self.anomalies: list[Anomaly] = []
self._redis: Optional[redis.Redis] = None
self._setup_redis()
self._setup_bug_db()
def _setup_redis(self):
"""Connect to DragonflyDB for real-time state"""
try:
self._redis = redis.Redis(
host='127.0.0.1',
port=6379,
password='governance2026',
decode_responses=True
)
self._redis.ping()
except Exception:
self._redis = None
def _setup_bug_db(self):
"""Initialize SQLite database for bug tracking"""
conn = sqlite3.connect(self.bug_db)
cursor = conn.cursor()
cursor.execute("""
CREATE TABLE IF NOT EXISTS bugs (
id TEXT PRIMARY KEY,
type TEXT NOT NULL,
severity TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'open',
phase INTEGER NOT NULL,
phase_name TEXT NOT NULL,
directory TEXT NOT NULL,
message TEXT NOT NULL,
details TEXT,
stack_trace TEXT,
checkpoint_id TEXT,
status_file TEXT,
detected_at TEXT NOT NULL,
updated_at TEXT,
resolved_at TEXT,
resolution_notes TEXT,
assigned_to TEXT
)
""")
cursor.execute("""
CREATE INDEX IF NOT EXISTS idx_bugs_status ON bugs(status)
""")
cursor.execute("""
CREATE INDEX IF NOT EXISTS idx_bugs_severity ON bugs(severity)
""")
cursor.execute("""
CREATE INDEX IF NOT EXISTS idx_bugs_phase ON bugs(phase)
""")
conn.commit()
conn.close()
def _now(self) -> str:
return datetime.now(timezone.utc).isoformat()
def start(self) -> WatcherState:
"""Start the bug watcher"""
self.state.active = True
self.state.started_at = self._now()
self.state.phases_watched = list(self.PHASES.keys())
if self._redis:
self._redis.hset("oversight:watcher", mapping={
"active": "true",
"started_at": self.state.started_at,
"phases": json.dumps(self.state.phases_watched)
})
return self.state
def stop(self) -> WatcherState:
"""Stop the bug watcher"""
self.state.active = False
if self._redis:
self._redis.hset("oversight:watcher", "active", "false")
return self.state
def scan_all_phases(self) -> list[Anomaly]:
"""Scan all phases for anomalies"""
all_anomalies = []
for phase_num in self.PHASES:
anomalies = self.scan_phase(phase_num)
all_anomalies.extend(anomalies)
self.state.last_scan_at = self._now()
self.state.anomalies_detected = len(all_anomalies)
return all_anomalies
def scan_phase(self, phase_num: int) -> list[Anomaly]:
"""Scan a specific phase for anomalies"""
anomalies = []
phase_name = self.PHASES.get(phase_num, f"Phase {phase_num}")
directories = self.PHASE_DIRECTORIES.get(phase_num, [])
# 1. Check STATUS.md files for issues
for dir_name in directories:
dir_path = self.base_path / dir_name
if dir_path.exists():
status_anomalies = self._check_status_file(dir_path, phase_num, phase_name)
anomalies.extend(status_anomalies)
# 2. Check for recent errors in ledger
ledger_anomalies = self._check_ledger_errors(phase_num, phase_name)
anomalies.extend(ledger_anomalies)
# 3. Check dependency health
dep_anomalies = self._check_dependencies(phase_num, phase_name)
anomalies.extend(dep_anomalies)
# 4. Check checkpoint consistency
ckpt_anomalies = self._check_checkpoint_consistency(phase_num, phase_name)
anomalies.extend(ckpt_anomalies)
# 5. Phase-specific checks
specific_anomalies = self._run_phase_specific_checks(phase_num, phase_name)
anomalies.extend(specific_anomalies)
# Store anomalies
self.anomalies.extend(anomalies)
self._persist_anomalies(anomalies)
return anomalies
def _check_status_file(self, dir_path: Path, phase_num: int, phase_name: str) -> list[Anomaly]:
"""Check STATUS.md file for issues"""
anomalies = []
status_file = dir_path / "STATUS.md"
if not status_file.exists():
anomalies.append(Anomaly(
id="",
type=AnomalyType.MISSING_ARTIFACT,
severity=Severity.LOW,
phase=phase_num,
phase_name=phase_name,
directory=str(dir_path.relative_to(self.base_path)),
message=f"Missing STATUS.md in {dir_path.name}",
status_file=None
))
return anomalies
try:
content = status_file.read_text()
# Check for blocked status
if "BLOCKED" in content.upper() or "" in content:
anomalies.append(Anomaly(
id="",
type=AnomalyType.STATE_INCONSISTENCY,
severity=Severity.HIGH,
phase=phase_num,
phase_name=phase_name,
directory=str(dir_path.relative_to(self.base_path)),
message=f"Directory {dir_path.name} is BLOCKED",
status_file=str(status_file),
details={"content_preview": content[:500]}
))
# Check for stale status (not updated in 7 days)
if "Last updated:" in content:
try:
# Extract date from "Last updated: YYYY-MM-DD"
for line in content.split('\n'):
if 'Last updated:' in line or 'last_updated' in line.lower():
# Try to find a date pattern
import re
date_match = re.search(r'(\d{4}-\d{2}-\d{2})', line)
if date_match:
last_update = datetime.fromisoformat(date_match.group(1))
if datetime.now() - last_update > timedelta(days=7):
anomalies.append(Anomaly(
id="",
type=AnomalyType.STATE_INCONSISTENCY,
severity=Severity.LOW,
phase=phase_num,
phase_name=phase_name,
directory=str(dir_path.relative_to(self.base_path)),
message=f"Stale STATUS.md - last updated {date_match.group(1)}",
status_file=str(status_file)
))
break
except Exception:
pass
except Exception as e:
anomalies.append(Anomaly(
id="",
type=AnomalyType.UNHANDLED_ERROR,
severity=Severity.MEDIUM,
phase=phase_num,
phase_name=phase_name,
directory=str(dir_path.relative_to(self.base_path)),
message=f"Error reading STATUS.md: {e}",
status_file=str(status_file)
))
return anomalies
def _check_ledger_errors(self, phase_num: int, phase_name: str) -> list[Anomaly]:
"""Check governance ledger for recent errors"""
anomalies = []
if not self.ledger_db.exists():
return anomalies
try:
conn = sqlite3.connect(self.ledger_db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
# Check violations table
cursor.execute("""
SELECT * FROM violations
WHERE severity IN ('critical', 'high')
AND acknowledged = 0
ORDER BY timestamp DESC LIMIT 10
""")
for row in cursor.fetchall():
anomalies.append(Anomaly(
id="",
type=AnomalyType.SECURITY_VIOLATION,
severity=Severity.CRITICAL if row['severity'] == 'critical' else Severity.HIGH,
phase=phase_num,
phase_name=phase_name,
directory="ledger",
message=f"Unacknowledged {row['severity']} violation: {row['violation_type']}",
details={
"violation_id": row['id'],
"agent_id": row['agent_id'],
"description": row['description'],
"timestamp": row['timestamp']
}
))
conn.close()
except Exception as e:
self.state.error_count += 1
return anomalies
def _check_dependencies(self, phase_num: int, phase_name: str) -> list[Anomaly]:
"""Check dependency availability"""
anomalies = []
# Check Vault
try:
result = subprocess.run(
["docker", "exec", "vault", "vault", "status", "-format=json"],
capture_output=True, text=True, timeout=5
)
if result.returncode != 0:
anomalies.append(Anomaly(
id="",
type=AnomalyType.DEPENDENCY_UNAVAILABLE,
severity=Severity.CRITICAL,
phase=phase_num,
phase_name=phase_name,
directory="infrastructure",
message="Vault is unavailable or sealed",
details={"stderr": result.stderr[:500] if result.stderr else ""}
))
except Exception as e:
anomalies.append(Anomaly(
id="",
type=AnomalyType.DEPENDENCY_UNAVAILABLE,
severity=Severity.CRITICAL,
phase=phase_num,
phase_name=phase_name,
directory="infrastructure",
message=f"Cannot check Vault status: {e}"
))
# Check DragonflyDB
if self._redis:
try:
self._redis.ping()
except Exception:
anomalies.append(Anomaly(
id="",
type=AnomalyType.DEPENDENCY_UNAVAILABLE,
severity=Severity.HIGH,
phase=phase_num,
phase_name=phase_name,
directory="infrastructure",
message="DragonflyDB is unavailable"
))
# Check Ledger DB
if not self.ledger_db.exists():
anomalies.append(Anomaly(
id="",
type=AnomalyType.DEPENDENCY_UNAVAILABLE,
severity=Severity.HIGH,
phase=phase_num,
phase_name=phase_name,
directory="ledger",
message="Governance ledger database not found"
))
return anomalies
def _check_checkpoint_consistency(self, phase_num: int, phase_name: str) -> list[Anomaly]:
"""Check checkpoint data for consistency issues"""
anomalies = []
if not self.checkpoint_dir.exists():
return anomalies
checkpoints = sorted(self.checkpoint_dir.glob("ckpt-*.json"), reverse=True)
if not checkpoints:
anomalies.append(Anomaly(
id="",
type=AnomalyType.MISSING_ARTIFACT,
severity=Severity.MEDIUM,
phase=phase_num,
phase_name=phase_name,
directory="checkpoint",
message="No checkpoints found"
))
return anomalies
# Check latest checkpoint
try:
latest = json.loads(checkpoints[0].read_text())
# Verify content hash
if 'content_hash' in latest:
# Could verify hash here
pass
# Check for stale checkpoint (older than 1 hour)
if 'created_at' in latest:
created = datetime.fromisoformat(latest['created_at'].replace('Z', '+00:00'))
if datetime.now(timezone.utc) - created > timedelta(hours=1):
anomalies.append(Anomaly(
id="",
type=AnomalyType.STATE_INCONSISTENCY,
severity=Severity.LOW,
phase=phase_num,
phase_name=phase_name,
directory="checkpoint",
message=f"Last checkpoint is stale: {latest['created_at']}",
checkpoint_id=latest.get('checkpoint_id')
))
except Exception as e:
anomalies.append(Anomaly(
id="",
type=AnomalyType.UNHANDLED_ERROR,
severity=Severity.MEDIUM,
phase=phase_num,
phase_name=phase_name,
directory="checkpoint",
message=f"Error reading checkpoint: {e}"
))
return anomalies
def _run_phase_specific_checks(self, phase_num: int, phase_name: str) -> list[Anomaly]:
"""Run checks specific to each phase"""
anomalies = []
if phase_num == 3: # Execution Pipeline
# Check preflight module
preflight_path = self.base_path / "preflight" / "preflight.py"
if not preflight_path.exists():
anomalies.append(Anomaly(
id="",
type=AnomalyType.MISSING_ARTIFACT,
severity=Severity.HIGH,
phase=phase_num,
phase_name=phase_name,
directory="preflight",
message="Preflight module missing"
))
elif phase_num == 4: # Promotion/Revocation
# Check for agents with high violation counts
if self._redis:
try:
keys = self._redis.keys("agent:*:errors")
for key in keys[:10]: # Limit check
errors = self._redis.hgetall(key)
total = int(errors.get('total_errors', 0))
if total > 5:
agent_id = key.split(':')[1]
anomalies.append(Anomaly(
id="",
type=AnomalyType.REGRESSION,
severity=Severity.MEDIUM,
phase=phase_num,
phase_name=phase_name,
directory="runtime",
message=f"Agent {agent_id} has {total} errors",
details=errors
))
except Exception:
pass
elif phase_num == 5: # Agent Bootstrapping - SPECIAL ATTENTION
# Check tier0-agent
tier0_config = self.base_path / "agents" / "tier0-agent" / "config" / "agent.json"
if not tier0_config.exists():
anomalies.append(Anomaly(
id="",
type=AnomalyType.MISSING_ARTIFACT,
severity=Severity.HIGH,
phase=phase_num,
phase_name=phase_name,
directory="agents/tier0-agent",
message="Tier 0 agent config missing"
))
# Check orchestrator
model_controller = self.base_path / "orchestrator" / "model_controller.py"
if not model_controller.exists():
anomalies.append(Anomaly(
id="",
type=AnomalyType.MISSING_ARTIFACT,
severity=Severity.MEDIUM,
phase=phase_num,
phase_name=phase_name,
directory="orchestrator",
message="Model controller missing"
))
elif phase_num == 8: # Production Hardening
# Check if health_manager exists
health_manager = self.base_path / "runtime" / "health_manager.py"
if not health_manager.exists():
anomalies.append(Anomaly(
id="",
type=AnomalyType.MISSING_ARTIFACT,
severity=Severity.HIGH,
phase=phase_num,
phase_name=phase_name,
directory="runtime",
message="health_manager.py not implemented - Phase 8 blocked"
))
return anomalies
def _persist_anomalies(self, anomalies: list[Anomaly]):
"""Persist anomalies to storage (Redis + SQLite)"""
# Persist to SQLite
conn = sqlite3.connect(self.bug_db)
cursor = conn.cursor()
for anomaly in anomalies:
# Convert enum values to strings for storage
type_val = anomaly.type.value if hasattr(anomaly.type, 'value') else anomaly.type
sev_val = anomaly.severity.value if hasattr(anomaly.severity, 'value') else anomaly.severity
status_val = anomaly.status.value if hasattr(anomaly.status, 'value') else anomaly.status
cursor.execute("""
INSERT OR REPLACE INTO bugs
(id, type, severity, status, phase, phase_name, directory, message,
details, stack_trace, checkpoint_id, status_file, detected_at,
updated_at, resolution_notes, assigned_to)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""", (
anomaly.id,
type_val,
sev_val,
status_val,
anomaly.phase,
anomaly.phase_name,
anomaly.directory,
anomaly.message,
json.dumps(anomaly.details) if anomaly.details else None,
anomaly.stack_trace,
anomaly.checkpoint_id,
anomaly.status_file,
anomaly.detected_at,
anomaly.updated_at,
anomaly.resolution_notes,
anomaly.assigned_to
))
conn.commit()
conn.close()
# Also persist to Redis for real-time access
if not self._redis:
return
for anomaly in anomalies:
# Store in Redis list
self._redis.lpush(
"oversight:anomalies",
json.dumps(asdict(anomaly))
)
# Keep only last 1000
self._redis.ltrim("oversight:anomalies", 0, 999)
# Index by severity
sev_val = anomaly.severity.value if hasattr(anomaly.severity, 'value') else anomaly.severity
self._redis.sadd(f"oversight:anomalies:{sev_val}", anomaly.id)
# Index by phase
self._redis.sadd(f"oversight:anomalies:phase:{anomaly.phase}", anomaly.id)
# Index by status
status_val = anomaly.status.value if hasattr(anomaly.status, 'value') else anomaly.status
self._redis.sadd(f"oversight:anomalies:status:{status_val}", anomaly.id)
def get_anomalies(
self,
severity: Optional[Severity] = None,
phase: Optional[int] = None,
status: Optional[BugStatus] = None,
limit: int = 50
) -> list[Anomaly]:
"""Retrieve anomalies with optional filters from SQLite"""
conn = sqlite3.connect(self.bug_db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
query = "SELECT * FROM bugs WHERE 1=1"
params = []
if severity:
sev_val = severity.value if hasattr(severity, 'value') else severity
query += " AND severity = ?"
params.append(sev_val)
if phase:
query += " AND phase = ?"
params.append(phase)
if status:
status_val = status.value if hasattr(status, 'value') else status
query += " AND status = ?"
params.append(status_val)
query += " ORDER BY detected_at DESC LIMIT ?"
params.append(limit)
cursor.execute(query, params)
rows = cursor.fetchall()
conn.close()
anomalies = []
for row in rows:
try:
anomaly = Anomaly(
id=row['id'],
type=AnomalyType(row['type']),
severity=Severity(row['severity']),
status=BugStatus(row['status']),
phase=row['phase'],
phase_name=row['phase_name'],
directory=row['directory'],
message=row['message'],
details=json.loads(row['details']) if row['details'] else {},
stack_trace=row['stack_trace'],
checkpoint_id=row['checkpoint_id'],
status_file=row['status_file'],
detected_at=row['detected_at'],
updated_at=row['updated_at'],
resolution_notes=row['resolution_notes'],
assigned_to=row['assigned_to'],
resolved=row['status'] == 'resolved'
)
anomalies.append(anomaly)
except Exception:
continue
return anomalies
def update_bug_status(
self,
bug_id: str,
new_status: BugStatus,
notes: Optional[str] = None,
assigned_to: Optional[str] = None
) -> bool:
"""Update bug status with optional notes and assignment"""
conn = sqlite3.connect(self.bug_db)
cursor = conn.cursor()
now = self._now()
status_val = new_status.value if hasattr(new_status, 'value') else new_status
# Build update query
updates = ["status = ?", "updated_at = ?"]
params = [status_val, now]
if notes is not None:
updates.append("resolution_notes = ?")
params.append(notes)
if assigned_to is not None:
updates.append("assigned_to = ?")
params.append(assigned_to)
if new_status == BugStatus.RESOLVED:
updates.append("resolved_at = ?")
params.append(now)
params.append(bug_id)
cursor.execute(f"""
UPDATE bugs SET {', '.join(updates)} WHERE id = ?
""", params)
updated = cursor.rowcount > 0
conn.commit()
conn.close()
# Update Redis index if available
if self._redis and updated:
# Remove from old status sets, add to new
for s in BugStatus:
self._redis.srem(f"oversight:anomalies:status:{s.value}", bug_id)
self._redis.sadd(f"oversight:anomalies:status:{status_val}", bug_id)
self._redis.hset(f"oversight:anomaly:{bug_id}", mapping={
"status": status_val,
"updated_at": now,
"resolution_notes": notes or "",
"assigned_to": assigned_to or ""
})
return updated
def acknowledge_anomaly(self, anomaly_id: str, notes: str = "") -> bool:
"""Mark an anomaly as resolved (backwards compatible)"""
return self.update_bug_status(anomaly_id, BugStatus.RESOLVED, notes)
def get_bug(self, bug_id: str) -> Optional[Anomaly]:
"""Get a single bug by ID"""
conn = sqlite3.connect(self.bug_db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute("SELECT * FROM bugs WHERE id = ?", (bug_id,))
row = cursor.fetchone()
conn.close()
if not row:
return None
return Anomaly(
id=row['id'],
type=AnomalyType(row['type']),
severity=Severity(row['severity']),
status=BugStatus(row['status']),
phase=row['phase'],
phase_name=row['phase_name'],
directory=row['directory'],
message=row['message'],
details=json.loads(row['details']) if row['details'] else {},
stack_trace=row['stack_trace'],
checkpoint_id=row['checkpoint_id'],
status_file=row['status_file'],
detected_at=row['detected_at'],
updated_at=row['updated_at'],
resolution_notes=row['resolution_notes'],
assigned_to=row['assigned_to'],
resolved=row['status'] == 'resolved'
)
def log_bug(
self,
message: str,
severity: Severity = Severity.MEDIUM,
bug_type: AnomalyType = AnomalyType.UNHANDLED_ERROR,
phase: int = 0,
directory: str = "unknown",
details: Optional[dict] = None,
stack_trace: Optional[str] = None
) -> Anomaly:
"""Manually log a bug (for API/CLI use)"""
anomaly = Anomaly(
id="",
type=bug_type,
severity=severity,
status=BugStatus.OPEN,
phase=phase,
phase_name=self.PHASES.get(phase, f"Phase {phase}"),
directory=directory,
message=message,
details=details or {},
stack_trace=stack_trace
)
self._persist_anomalies([anomaly])
self.anomalies.append(anomaly)
return anomaly
def get_summary(self) -> dict:
"""Get summary of watcher state and anomalies"""
anomalies = self.get_anomalies(limit=1000)
by_severity = {s.value: 0 for s in Severity}
by_phase = {p: 0 for p in self.PHASES}
by_type = {t.value: 0 for t in AnomalyType}
by_status = {s.value: 0 for s in BugStatus}
for a in anomalies:
# Handle both enum and string values
sev_val = a.severity.value if hasattr(a.severity, 'value') else a.severity
type_val = a.type.value if hasattr(a.type, 'value') else a.type
status_val = a.status.value if hasattr(a.status, 'value') else a.status
by_severity[sev_val] = by_severity.get(sev_val, 0) + 1
by_phase[a.phase] = by_phase.get(a.phase, 0) + 1
by_type[type_val] = by_type.get(type_val, 0) + 1
by_status[status_val] = by_status.get(status_val, 0) + 1
return {
"state": asdict(self.state),
"total_anomalies": len(anomalies),
"open": by_status.get("open", 0),
"in_progress": by_status.get("in_progress", 0),
"resolved": by_status.get("resolved", 0),
"by_severity": by_severity,
"by_phase": by_phase,
"by_type": by_type,
"by_status": by_status,
"phases": self.PHASES
}
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Bug Window Watcher")
parser.add_argument("command", choices=["scan", "status", "list", "update", "log", "get"], help="Command to run")
parser.add_argument("--phase", type=int, help="Specific phase to scan")
parser.add_argument("--severity", choices=["critical", "high", "medium", "low", "info"])
parser.add_argument("--bug-status", dest="bug_status", choices=["open", "in_progress", "resolved"], help="Filter by bug status")
parser.add_argument("--json", action="store_true", help="Output as JSON")
# For update command
parser.add_argument("--id", help="Bug ID to update or get")
parser.add_argument("--set-status", dest="set_status", choices=["open", "in_progress", "resolved"], help="New status to set")
parser.add_argument("--notes", help="Resolution or status notes")
parser.add_argument("--assign", help="Assign bug to person/team")
# For log command
parser.add_argument("--message", "-m", help="Bug message (for log command)")
parser.add_argument("--directory", "-d", default="unknown", help="Directory (for log command)")
parser.add_argument("--type", dest="bug_type", choices=[t.value for t in AnomalyType], default="unhandled_error")
args = parser.parse_args()
watcher = BugWindowWatcher()
watcher.start()
if args.command == "scan":
if args.phase:
anomalies = watcher.scan_phase(args.phase)
else:
anomalies = watcher.scan_all_phases()
if args.json:
print(json.dumps([asdict(a) for a in anomalies], indent=2))
else:
print(f"\n{'='*60}")
print(f"BUG WINDOW WATCHER - Scan Results")
print(f"{'='*60}")
print(f"Anomalies found: {len(anomalies)}")
print()
for a in anomalies:
sev_val = a.severity.value if hasattr(a.severity, 'value') else a.severity
status_val = a.status.value if hasattr(a.status, 'value') else a.status
icon = {"critical": "🔴", "high": "🟠", "medium": "🟡", "low": "🔵", "info": ""}.get(sev_val, "")
status_icon = {"open": "📋", "in_progress": "🔧", "resolved": ""}.get(status_val, "")
print(f"{icon} [{sev_val.upper()}] {status_icon} {status_val.upper()} | Phase {a.phase}: {a.message}")
print(f" ID: {a.id}")
print(f" Directory: {a.directory}")
if a.status_file:
print(f" Status: {a.status_file}")
print()
elif args.command == "status":
summary = watcher.get_summary()
if args.json:
print(json.dumps(summary, indent=2))
else:
print(f"\n{'='*60}")
print(f"BUG WINDOW WATCHER - Status")
print(f"{'='*60}")
print(f"Active: {summary['state']['active']}")
print(f"Total Bugs: {summary['total_anomalies']}")
print()
print("By Status:")
print(f" 📋 Open: {summary['open']}")
print(f" 🔧 In Progress: {summary['in_progress']}")
print(f" ✅ Resolved: {summary['resolved']}")
print()
print("By Severity:")
for sev, count in summary['by_severity'].items():
if count > 0:
icon = {"critical": "🔴", "high": "🟠", "medium": "🟡", "low": "🔵", "info": ""}.get(sev, "")
print(f" {icon} {sev}: {count}")
elif args.command == "list":
severity = Severity(args.severity) if args.severity else None
status = BugStatus(args.bug_status) if args.bug_status else None
anomalies = watcher.get_anomalies(severity=severity, phase=args.phase, status=status)
if args.json:
print(json.dumps([asdict(a) for a in anomalies], indent=2))
else:
if not anomalies:
print("No bugs found matching criteria.")
else:
print(f"\n{'='*70}")
print(f"{'ID':<20} {'Status':<12} {'Severity':<10} {'Message'}")
print(f"{'='*70}")
for a in anomalies:
sev_val = a.severity.value if hasattr(a.severity, 'value') else a.severity
status_val = a.status.value if hasattr(a.status, 'value') else a.status
msg = a.message[:40] + "..." if len(a.message) > 40 else a.message
print(f"{a.id:<20} {status_val:<12} {sev_val:<10} {msg}")
elif args.command == "update":
if not args.id:
print("Error: --id is required for update command")
exit(1)
if not args.set_status:
print("Error: --set-status is required for update command")
exit(1)
new_status = BugStatus(args.set_status)
success = watcher.update_bug_status(
args.id,
new_status,
notes=args.notes,
assigned_to=args.assign
)
if success:
bug = watcher.get_bug(args.id)
if args.json:
print(json.dumps(asdict(bug), indent=2))
else:
print(f"✅ Bug {args.id} updated to {args.set_status}")
if args.notes:
print(f" Notes: {args.notes}")
if args.assign:
print(f" Assigned to: {args.assign}")
else:
print(f"❌ Failed to update bug {args.id} - not found")
exit(1)
elif args.command == "get":
if not args.id:
print("Error: --id is required for get command")
exit(1)
bug = watcher.get_bug(args.id)
if bug:
if args.json:
print(json.dumps(asdict(bug), indent=2))
else:
sev_val = bug.severity.value if hasattr(bug.severity, 'value') else bug.severity
status_val = bug.status.value if hasattr(bug.status, 'value') else bug.status
type_val = bug.type.value if hasattr(bug.type, 'value') else bug.type
print(f"\n{'='*60}")
print(f"Bug: {bug.id}")
print(f"{'='*60}")
print(f"Status: {status_val}")
print(f"Severity: {sev_val}")
print(f"Type: {type_val}")
print(f"Phase: {bug.phase} - {bug.phase_name}")
print(f"Directory: {bug.directory}")
print(f"Message: {bug.message}")
print(f"Detected: {bug.detected_at}")
if bug.updated_at:
print(f"Updated: {bug.updated_at}")
if bug.assigned_to:
print(f"Assigned to: {bug.assigned_to}")
if bug.resolution_notes:
print(f"Notes: {bug.resolution_notes}")
else:
print(f"❌ Bug {args.id} not found")
exit(1)
elif args.command == "log":
if not args.message:
print("Error: --message/-m is required for log command")
exit(1)
severity = Severity(args.severity) if args.severity else Severity.MEDIUM
bug_type = AnomalyType(args.bug_type)
phase = args.phase or 0
bug = watcher.log_bug(
message=args.message,
severity=severity,
bug_type=bug_type,
phase=phase,
directory=args.directory
)
if args.json:
print(json.dumps(asdict(bug), indent=2))
else:
print(f"✅ Bug logged: {bug.id}")
print(f" Severity: {severity.value}")
print(f" Status: open")
print(f" Message: {args.message}")