profit ac01fffd9a checkpoint: matrix-agent-validated (2026-04-25)
Architectural snapshot of the lakehouse codebase at the point where the
full matrix-driven agent loop with Mem0 versioning + deletion was
validated end-to-end.

WHAT THIS REPO IS
A clean single-commit snapshot of the lakehouse code. Heavy test data
(.parquet datasets, vector indexes) excluded — see REPLICATION.md for
regen path. Full lakehouse history at git.agentview.dev/profit/lakehouse.

WHAT WAS PROVEN
- Vector retrieval across multi-corpora matrix (chicago_permits + entity
  briefs + sec_tickers + distilled procedural + llm_team runs)
- Observer hand-review (cloud + heuristic fallback) gating each candidate
- Local-model agent loop (qwen3.5:latest) with tool use + scratchpad
- Playbook seal on success → next-iter retrieval surfaces it as preamble
- Mem0 versioning + deletion in pathway_memory:
    * UPSERT: ADD on new workflow, UPDATE bumps replay_count on identical
    * REVISE: chains versions, parent.superseded_at + superseded_by stamped
    * RETIRE: marks specific trace retired with reason, excluded from retrieval
    * HISTORY: walks chain root→tip, cycle-safe

KEY DIRECTORIES
- crates/vectord/src/pathway_memory.rs — Mem0 ops live here
- crates/vectord/src/playbook_memory.rs — original Mem0 reference
- tests/agent_test/ — local-model agent harness + PRD + session archives
- scripts/dump_raw_corpus.sh — MinIO bucket dump (raw test corpus)
- scripts/vectorize_raw_corpus.ts — corpus → vector indexes
- scripts/analyze_chicago_contracts.ts — real inference pipeline
- scripts/seal_agent_playbook.ts — Mem0 upsert from agent traces

Replication: see REPLICATION.md for Debian 13 clean install + cloud-only
adaptation (no local Ollama).

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-25 19:43:27 -05:00

884 lines
46 KiB
JavaScript
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

// Visual Control Plane — client (vanilla JS, D3 from CDN)
// Design note: KB data flows from local jsonl files we control, but we
// still use DOM methods (createElement/textContent) for every
// data-derived node to satisfy static analysis and keep a clean XSS
// boundary if the UI ever gets exposed.
const POLL_MS = 3000;
const state = {
view: "map",
overlay: "status",
selected: null,
services: null,
reviews: [],
metrics: [],
overrides: [],
trust: [],
findings: [],
};
// ───── view switcher ─────
document.querySelectorAll("#views button").forEach(b => {
b.addEventListener("click", () => {
document.querySelectorAll("#views button").forEach(x => x.classList.remove("on"));
b.classList.add("on");
state.view = b.dataset.view;
document.querySelectorAll(".view").forEach(v => v.classList.remove("on"));
document.getElementById(`view-${state.view}`).classList.add("on");
renderView();
});
});
document.querySelectorAll("#overlay-controls button").forEach(b => {
b.addEventListener("click", () => {
document.querySelectorAll("#overlay-controls button").forEach(x => x.classList.remove("on"));
b.classList.add("on");
state.overlay = b.dataset.ov;
if (state.services) drawMap(state.services);
});
});
// ───── helpers ─────
function el(tag, opts = {}, ...kids) {
const n = document.createElement(tag);
if (opts.className) n.className = opts.className;
if (opts.text != null) n.textContent = String(opts.text);
if (opts.data) for (const k in opts.data) n.dataset[k] = opts.data[k];
if (opts.attrs) for (const k in opts.attrs) n.setAttribute(k, opts.attrs[k]);
if (opts.style) for (const k in opts.style) n.style[k] = opts.style[k];
for (const k of kids) if (k != null) n.append(k);
return n;
}
function clear(node) { while (node.firstChild) node.removeChild(node.firstChild); }
function row(k, v, valClass) {
const r = el("div", { className: "ctx-row" });
r.append(el("span", { className: "k", text: k }));
const vv = el("span", { className: "v" + (valClass ? " " + valClass : ""), text: String(v ?? "-") });
r.append(vv);
return r;
}
function short(v) {
if (v == null) return "-";
if (typeof v === "object") return JSON.stringify(v).slice(0, 80);
return String(v).slice(0, 80);
}
// ───── polling ─────
async function poll() {
try {
const [svc, rev, met, ov, tr, fin] = await Promise.all([
fetch("/data/services").then(r => r.json()),
fetch("/data/reviews?tail=80").then(r => r.json()),
fetch("/data/metrics").then(r => r.json()),
fetch("/data/overrides").then(r => r.json()),
fetch("/data/trust").then(r => r.json()),
fetch("/data/findings").then(r => r.json()),
]);
state.services = svc;
state.reviews = Array.isArray(rev) ? rev : [];
state.metrics = Array.isArray(met) ? met : [];
state.overrides = Array.isArray(ov) ? ov : [];
state.trust = Array.isArray(tr) ? tr : [];
state.findings = Array.isArray(fin) ? fin : [];
document.getElementById("build-ts").textContent = new Date(svc.ts).toLocaleTimeString();
svc.nodes.forEach(n => {
const chip = document.querySelector(`.hbchip[data-svc="${n.id}"]`);
if (chip) chip.setAttribute("data-status", n.status);
});
renderView();
renderContext();
pollStream();
} catch (e) { console.error("poll error", e); }
}
async function pollStream() {
try {
const j = await fetch("/data/scrum_log").then(r => r.json());
if (!j.lines) return;
document.getElementById("stream-file").textContent = j.file ? j.file.split("/").pop() : "—";
const body = document.getElementById("stream-body");
clear(body);
j.lines.slice(-30).forEach(line => {
const cls = /✓ ACCEPTED/.test(line) ? "ok"
: /✗ thin/.test(line) ? "thin"
: /error|failed|FAIL/i.test(line) ? "err"
: /^\[scrum\] file:/.test(line) ? "head"
: "info";
body.append(el("div", { className: "sline " + cls, text: line }));
});
body.scrollTop = body.scrollHeight;
} catch {}
}
function renderView() {
if (!state.services) return;
if (state.view === "map") drawMap(state.services);
else if (state.view === "trace") drawTrace();
else if (state.view === "trajectory") drawTrajectory();
else if (state.view === "metrics") drawMetrics();
else if (state.view === "kb") drawKB();
else if (state.view === "console") drawConsole();
}
// ───── MAP ─────
const NODES_STATIC = [
{ id: "gateway", x: 0.5, y: 0.15 },
{ id: "sidecar", x: 0.2, y: 0.3 },
{ id: "observer", x: 0.8, y: 0.3 },
{ id: "mcp", x: 0.85, y: 0.1 },
{ id: "context7", x: 0.15, y: 0.1 },
{ id: "journal", x: 0.35, y: 0.55 },
{ id: "vectord", x: 0.5, y: 0.5 },
{ id: "playbook", x: 0.65, y: 0.55 },
{ id: "agent", x: 0.5, y: 0.75 },
{ id: "usage", x: 0.2, y: 0.75 },
];
const EDGES = [
["gateway","sidecar"],["gateway","observer"],["gateway","mcp"],["gateway","context7"],
["gateway","journal"],["gateway","vectord"],["gateway","playbook"],["gateway","agent"],["gateway","usage"],
["vectord","playbook"],["agent","vectord"],["observer","playbook"],["sidecar","vectord"],
];
function drawMap(svc) {
const svg = d3.select("#map");
const box = svg.node().getBoundingClientRect();
const W = box.width, H = box.height;
svg.selectAll("*").remove();
const statusMap = {};
[...svc.nodes, ...svc.subsystems].forEach(n => statusMap[n.id] = n);
svg.selectAll(".edge").data(EDGES).enter().append("line")
.attr("class", d => "edge" + (overlayEdgeActive(d) ? " active" : ""))
.attr("x1", d => nodePos(d[0]).x * W).attr("y1", d => nodePos(d[0]).y * H)
.attr("x2", d => nodePos(d[1]).x * W).attr("y2", d => nodePos(d[1]).y * H);
const g = svg.selectAll(".node").data(NODES_STATIC).enter().append("g")
.attr("class", "node")
.attr("transform", d => `translate(${d.x * W}, ${d.y * H})`)
.on("click", (_ev, d) => { state.selected = { type:"node", id:d.id }; renderContext(); drawMap(svc); });
g.append("circle")
.attr("class", d => "node-circle" + (state.selected?.type==="node" && state.selected.id===d.id ? " node-selected" : ""))
.attr("r", d => nodeRadius(d, statusMap))
.attr("fill", d => nodeColor(d, statusMap));
// SVG tooltip — hover a node, browser shows a native tooltip with
// what this node DOES, not just its name.
g.append("title").text(d => nodeTooltip(d.id));
g.append("text").attr("class","node-label").attr("y", -30).text(d => nodeLabel(d.id));
g.append("text").attr("class","node-sub").attr("y", 40).text(d => nodeSub(d, statusMap));
}
function nodePos(id) { return NODES_STATIC.find(x => x.id === id) ?? { x:0, y:0 }; }
function nodeLabel(id) {
return ({gateway:"GATEWAY",sidecar:"SIDECAR",observer:"OBSERVER",mcp:"MCP",context7:"CTX7",
journal:"JOURNAL",vectord:"VECTORD",playbook:"PLAYBOOK",agent:"AUTOTUNE",usage:"USAGE"})[id] ?? id;
}
function nodeRadius(d, m) {
const n = m[d.id];
if (state.overlay === "activity") {
if (d.id === "journal" && n?.stats?.total_events_created != null) return 14 + Math.min(20, Math.log2(n.stats.total_events_created + 1) * 2);
if (d.id === "vectord" && n?.stats?.count != null) return 14 + Math.min(20, Math.log2(n.stats.count + 1) * 2);
if (d.id === "playbook" && n?.stats?.total != null) return 14 + Math.min(20, Math.log2(n.stats.total + 1));
if (d.id === "observer" && n?.stats?.total != null) return 14 + Math.min(20, Math.log2(n.stats.total + 1));
if (d.id === "usage" && n?.stats?.requests != null) return 14 + Math.min(20, Math.log2(n.stats.requests + 1) * 2);
}
return 18;
}
function nodeColor(d, m) {
const n = m[d.id];
const ov = state.overlay;
if (ov === "status" || ov === "activity") {
const st = n?.status ?? (n?.stats ? "healthy" : "unknown");
return { healthy:"#3eed86", degraded:"#ffbf3c", down:"#ff4d6e", unknown:"#525c6f" }[st] ?? "#525c6f";
}
if (ov === "confidence") {
const c = recentAvgConfidence(d.id);
if (c == null) return "#525c6f";
if (c >= 88) return "#3eed86";
if (c >= 70) return "#55c5ff";
if (c >= 50) return "#ffbf3c";
return "#ff4d6e";
}
if (ov === "gradient") {
const t = recentGradientTier(d.id);
return t ? ({auto:"#3eed86",dry_run:"#55c5ff",simulation:"#ffbf3c",block:"#ff4d6e"}[t] ?? "#525c6f") : "#525c6f";
}
if (ov === "verdict") {
const v = recentVerdict(d.id);
return {pass:"#3eed86",needs_patch:"#ff9f43",fail:"#ff4d6e"}[v] ?? "#525c6f";
}
return "#55c5ff";
}
function nodeSub(d, m) {
const n = m[d.id];
if (!n) return "…";
if (d.id === "journal" && n.stats) return `${n.stats.total_events_created ?? 0} events · ${n.stats.persisted_files ?? 0} parquet`;
if (d.id === "usage" && n.stats) return `${n.stats.requests ?? 0} requests · ${Math.round((n.stats.total_tokens ?? 0)/1000)}k tokens`;
if (d.id === "vectord" && typeof n.stats === "object" && n.stats) return `${n.stats.count ?? 0} indexes`;
if (d.id === "playbook" && n.stats) return `${n.stats.active ?? 0} active · ${n.stats.retired ?? 0} retired`;
if (d.id === "agent" && n.stats) return `${n.stats.trials_run ?? 0} trials · ${n.stats.promotions ?? 0} promotions`;
if (d.id === "observer" && n.stats) return `${n.stats.total ?? 0} observed ops`;
return String(n.status ?? "");
}
// Describes what each node DOES — shown as SVG <title> tooltip.
function nodeTooltip(id) {
return ({
gateway: "GATEWAY — Rust/Axum HTTP on :3100. Every external call enters here: /v1/chat, /ingest, /query, /tools, /journal, /vectors. Also hosts gRPC on :3101.",
sidecar: "SIDECAR — Python FastAPI on :3200. Adapter from Rust to local Ollama (:11434). Handles /embed /generate /rerank. Stateless.",
observer: "OBSERVER — Bun on :3800. Ring buffer of recent ops across the system. Feeds analyzeErrors + PLAYBOOK_BUILDER loops. Scrum events now land here (P45 fix).",
mcp: "MCP — Bun on :3700. Model Context Protocol tool gateway. Agent-facing tool endpoints.",
context7: "CONTEXT7 — Bun on :3900. Doc-drift resolver — checks playbook doc_refs against current docs for version drift (Phase 45 target).",
journal: "JOURNAL — ADR-012 append-only mutation log inside the gateway. Every ingest/delta-write/tombstone should record here. Currently ~1 real event (P9-001 still mostly unwired).",
vectord: "VECTORD — Embeddings store + HNSW index + autotune harness. The 'indexes' count = named vector indexes live right now (one per source × model_version).",
playbook: "PLAYBOOK — Meta-index. Each entry = a successful past pattern + geo/role + 768d embedding. Active entries boost future vector-search results (Phase 19).",
agent: "AUTOTUNE — Background agent that continuously proposes HNSW config trials, picks Pareto winners above min_recall, promotes, and rolls back. Self-tuning vector index.",
usage: "USAGE — /v1/chat token counters. Tracks requests, prompt/completion tokens, per-provider breakdown. Grows with scrum + audit traffic.",
})[id] ?? id;
}
function overlayEdgeActive(edge) {
if (!state.reviews.length) return false;
const latest = state.reviews[state.reviews.length - 1];
if (!latest?.reviewed_at) return false;
const age = Date.now() - new Date(latest.reviewed_at).getTime();
if (age > 60000) return false;
return edge.includes("gateway") && (edge.includes("observer") || edge.includes("vectord"));
}
function matchesNode(r, id) {
if (!r?.file) return false;
const f = r.file.toLowerCase();
if (id === "gateway") return f.includes("/gateway/");
if (id === "vectord") return f.includes("/vectord");
if (id === "journal") return f.includes("/journald");
if (id === "playbook")return f.includes("playbook_memory");
if (id === "sidecar") return f.includes("sidecar");
if (id === "agent") return f.includes("agent.rs") || f.includes("autotune");
return false;
}
function recentAvgConfidence(id) {
const rs = state.reviews.filter(r => matchesNode(r, id));
const vs = rs.map(r => r.confidence_avg).filter(v => v != null);
return vs.length ? vs.reduce((a,b)=>a+b,0)/vs.length : null;
}
function recentGradientTier(id) {
const rs = state.reviews.filter(r => matchesNode(r, id));
const ts = rs.map(r => r.gradient_tier).filter(Boolean);
return ts[ts.length - 1] ?? null;
}
function recentVerdict(id) {
const rs = state.reviews.filter(r => matchesNode(r, id));
const vs = rs.map(r => r.verdict).filter(Boolean);
return vs[vs.length - 1] ?? null;
}
// ───── CONTEXT ─────
function renderContext() {
const target = document.getElementById("ctx-target");
const body = document.getElementById("ctx-body");
clear(body);
if (!state.selected) {
target.textContent = "no selection";
body.append(el("div", { className: "ctx-hint", text: "Click a node or a file in KB to inspect. Context persists across view switches." }));
body.append(el("div", { className: "ctx-section-hd", text: "System totals" }));
appendSummaryKV(body);
return;
}
if (state.selected.type === "node") renderNodeContext(state.selected.id, target, body);
else if (state.selected.type === "file") renderFileContext(state.selected.id, target, body);
}
function appendSummaryKV(body) {
const s = state.services;
if (!s) { body.append(el("div", { className: "ctx-hint", text: "loading…" })); return; }
const get = id => s.nodes.concat(s.subsystems).find(n => n.id === id);
const journal = get("journal")?.stats ?? {};
const usage = get("usage")?.stats ?? {};
const playbook = get("playbook")?.stats ?? {};
const agent = get("agent")?.stats ?? {};
const observer = get("observer")?.stats ?? {};
body.append(row("scrum reviews", state.reviews.length));
body.append(row("journal events", journal.total_events_created ?? 0));
body.append(row("usage tokens", (usage.total_tokens ?? 0).toLocaleString()));
body.append(row("playbook active", playbook.active ?? 0));
body.append(row("autotune trials", agent.trials_run ?? 0));
body.append(row("observer ops", observer.total ?? 0));
body.append(row("findings (h/m/l)", `${countFindingsSev("high")}/${countFindingsSev("medium")}/${countFindingsSev("low")}`));
}
function countFindingsSev(sev) {
let n = 0;
for (const row of state.findings) for (const f of row.findings ?? []) if (f.severity === sev) n++;
return n;
}
function renderNodeContext(id, target, body) {
target.textContent = `NODE · ${id.toUpperCase()}`;
const n = [...state.services.nodes, ...state.services.subsystems].find(x => x.id === id);
if (n?.health) {
body.append(el("div", { className: "ctx-section-hd", text: "Health" }));
// Fix 2026-04-24: some /health endpoints return a plain string like
// "lakehouse ok". Don't Object.entries() on strings — that iterates
// characters. Detect primitive vs object explicitly.
if (typeof n.health === "string" || typeof n.health === "number" || typeof n.health === "boolean") {
body.append(row("response", String(n.health).slice(0, 80)));
} else if (typeof n.health === "object" && n.health !== null) {
Object.entries(n.health).slice(0, 8).forEach(([k,v]) => body.append(row(k, short(v))));
}
}
if (n?.stats) {
body.append(el("div", { className: "ctx-section-hd", text: "Stats" }));
if (typeof n.stats === "string") {
body.append(row("raw", String(n.stats).slice(0, 80)));
} else if (typeof n.stats === "object" && n.stats !== null) {
Object.entries(n.stats).slice(0, 10).forEach(([k,v]) => body.append(row(k, short(v))));
}
}
const related = state.reviews.filter(r => matchesNode(r, id)).slice(-5).reverse();
if (related.length) {
body.append(el("div", { className: "ctx-section-hd", text: "Recent reviews" }));
related.forEach(r => {
const rr = row(r.file.split("/").pop(), `${r.confidence_avg ?? "-"}% · ${r.alignment_score ?? "?"}/10`);
rr.style.cursor = "pointer";
rr.addEventListener("click", () => { state.selected = { type:"file", id:r.file }; renderContext(); });
body.append(rr);
});
}
if (!body.firstChild) body.append(el("div", { className: "ctx-hint", text: "no data yet" }));
}
function renderFileContext(fpath, target, body) {
target.textContent = fpath.split("/").slice(-3).join("/");
const fileReviews = state.reviews.filter(r => r.file === fpath).slice(-6);
if (!fileReviews.length) {
body.append(el("div", { className: "ctx-hint", text: `no reviews for ${fpath}` }));
return;
}
const latest = fileReviews[fileReviews.length - 1];
const pillRow = el("div", { style: { paddingBottom: "6px" } });
if (latest.gradient_tier) pillRow.append(el("span", { className: `pill tier-${latest.gradient_tier}`, text: latest.gradient_tier }));
if (latest.verdict) pillRow.append(el("span", { className: `pill ver-${latest.verdict}`, text: latest.verdict }));
if (latest.output_format) pillRow.append(el("span", { className: `pill fmt-${latest.output_format}`, text: latest.output_format }));
body.append(pillRow);
const rows = [
["file", fpath],
["score", latest.alignment_score != null ? `${latest.alignment_score}/10` : "-"],
["conf avg", latest.confidence_avg != null ? `${latest.confidence_avg}%` : "-"],
["conf min", latest.confidence_min != null ? `${latest.confidence_min}%` : "-"],
["findings", latest.findings_count ?? 0],
["critical", latest.critical_failures_count ?? 0],
["verified", latest.verified_components_count ?? 0],
["missing", latest.missing_components_count ?? 0],
["model", latest.accepted_model ?? "-"],
["attempts", latest.attempts_made ?? 1],
["tree split", latest.tree_split_fired ? "yes" : "no"],
];
rows.forEach(([k,v]) => body.append(row(k, short(v))));
body.append(el("div", { className: "ctx-section-hd", text: "Score history" }));
fileReviews.forEach(r => body.append(row(new Date(r.reviewed_at).toLocaleTimeString(), `${r.alignment_score ?? "?"}/10 · ${r.confidence_avg ?? "-"}%`)));
body.append(el("div", { className: "ctx-section-hd", text: "Preview" }));
const pre = el("pre", { text: latest.suggestions_preview ?? "", style: { whiteSpace: "pre-wrap", fontFamily: "var(--mono)", fontSize: "10px", color: "var(--fg-dim)", maxHeight: "200px", overflowY: "auto" } });
body.append(pre);
document.getElementById("stream-file").textContent = fpath.split("/").pop();
}
// ───── TRACE ─────
async function drawTrace() {
const fpath = state.selected?.type === "file" ? state.selected.id : state.reviews[state.reviews.length-1]?.file;
const tl = document.getElementById("trace-timeline");
const detail = document.getElementById("trace-detail");
clear(tl); clear(detail);
document.getElementById("trace-file").textContent = fpath ?? "—";
if (!fpath) { tl.append(el("div", { className: "ctx-hint", text: "no file selected — pick one in KB view" })); return; }
const r = await fetch(`/data/file/${encodeURIComponent(fpath)}`).then(r => r.json());
const history = r.history ?? [];
document.getElementById("trace-runs").textContent = `${history.length} runs`;
history.forEach((h, i) => {
const node = el("div", { className: "trace-node" + (i === history.length - 1 ? " active" : "") });
node.append(el("div", { className: "tn-run", text: h.run_id }));
node.append(el("div", { className: "tn-score", text: h.score != null ? String(h.score) : "?" }));
node.append(el("div", { className: "tn-conf", text: `conf ${h.conf_avg ?? "-"}% · ${h.findings}f` }));
node.append(el("div", { className: "tn-model", text: (h.model ?? "").split("/").pop() }));
node.addEventListener("click", () => {
tl.querySelectorAll(".trace-node").forEach(x => x.classList.remove("active"));
node.classList.add("active");
clear(detail);
detail.append(el("pre", { text: h.preview ?? "" }));
});
tl.append(node);
});
if (history.length) { clear(detail); detail.append(el("pre", { text: history[history.length-1].preview ?? "" })); }
}
// ───── TRAJECTORY — refactor signals + reverse index + per-file delta ─────
let trajectorySearchTimer = null;
document.getElementById("traj-search")?.addEventListener("input", (e) => {
const q = e.target.value.trim();
clearTimeout(trajectorySearchTimer);
trajectorySearchTimer = setTimeout(() => runReverseIndex(q), 300);
});
async function runReverseIndex(query) {
const body = document.getElementById("traj-body");
if (!query) { drawTrajectory(); return; }
clear(body);
const res = await fetch(`/data/search?q=${encodeURIComponent(query)}`).then(r => r.json());
const hdr = el("div", { className: "traj-section-head", text: `REVERSE INDEX · "${query}" · ${res.hits?.length ?? 0} hits` });
body.append(hdr);
(res.hits ?? []).forEach(h => {
const card = el("div", { className: "traj-hit" });
card.append(el("div", { className: "traj-hit-top" },
el("span", { className: "traj-hit-file", text: h.file }),
el("span", { className: "traj-hit-meta", text: `${h.run_id} · ${(h.model ?? "").split("/").pop()}` })
));
card.append(el("div", { className: "traj-hit-snip", text: h.snippet }));
card.addEventListener("click", () => {
state.selected = { type: "file", id: `/home/profit/lakehouse/${h.file}` };
renderContext();
document.querySelector('#views button[data-view="trace"]').click();
});
body.append(card);
});
}
async function drawTrajectory() {
const body = document.getElementById("traj-body");
clear(body);
const statsEl = document.getElementById("traj-stats");
clear(statsEl);
// SECTION 0 — signal classes (CONVERGING/LOOPING/ORBITING/PLATEAU/MIXED)
try {
const sc = await fetch("/data/signal_classes").then(r => r.json());
body.append(el("div", { className: "traj-section-head", text: "SIGNAL CLASSES · iter-to-iter behavior per file" }));
body.append(el("div", { className: "traj-section-explain", text:
"Each file compared iter-to-iter: CONVERGING = fix landed (resolved > novel + score↑), " +
"LOOPING = same findings repeating (deadlock candidate for hyper-focus), " +
"ORBITING = novel findings every iter (healthy depth-first), " +
"PLATEAU = score+findings flat (diminishing returns, needs different angle), " +
"MIXED = partial movement, NEW = only 1 iter so far."
}));
const classRow = el("div", { className: "signal-class-row" });
for (const [cls, n] of Object.entries(sc.counts ?? {})) {
const chip = el("span", { className: `signal-chip signal-${cls.toLowerCase()}`, text: `${cls} ${n}` });
classRow.append(chip);
}
body.append(classRow);
const grid = el("div", { className: "signal-grid" });
const sorted = Object.entries(sc.classes ?? {}).sort((a, b) => {
const order = { CONVERGING: 0, LOOPING: 1, ORBITING: 2, MIXED: 3, PLATEAU: 4, NEW: 5 };
return (order[a[1].cls] ?? 9) - (order[b[1].cls] ?? 9);
});
for (const [file, info] of sorted) {
const card = el("div", { className: `signal-card signal-${info.cls.toLowerCase()}` });
card.append(el("div", { className: "signal-card-top" },
el("span", { className: `signal-chip signal-${info.cls.toLowerCase()}`, text: info.cls }),
el("span", { className: "signal-card-file", text: file })
));
const body2 = el("div", { className: "signal-card-body" });
if (info.prev_score != null || info.last_score != null) {
body2.append(el("div", { text: `score ${info.prev_score ?? "?"}${info.last_score ?? "?"}${info.delta_score != null ? (info.delta_score > 0 ? "+" : "") + info.delta_score.toFixed(1) : "?"})` }));
}
if (info.novel?.length) body2.append(el("div", { className: "signal-novel", text: `NEW: ${info.novel.join(", ")}` }));
if (info.resolved?.length) body2.append(el("div", { className: "signal-resolved", text: `RESOLVED: ${info.resolved.join(", ")}` }));
if (info.looping?.length) body2.append(el("div", { className: "signal-loop", text: `LOOPING: ${info.looping.join(", ")}` }));
card.append(body2);
card.addEventListener("click", () => {
state.selected = { type: "file", id: `/home/profit/lakehouse/${file}` };
renderContext();
document.querySelector('#views button[data-view="trace"]').click();
});
grid.append(card);
}
body.append(grid);
} catch (e) {
body.append(el("div", { className: "ctx-hint", text: `signal classes error: ${e}` }));
}
// SECTION 1 — refactor signals
const sig = await fetch("/data/refactor_signals").then(r => r.json());
const sigs = sig.signals ?? [];
const totalHits = sigs.reduce((a,s) => a + s.hits, 0);
statsEl.textContent = `${sig.scanned ?? 0} files scanned · ${sigs.length} with refactor hints · ${totalHits} phrase hits total`;
const sigHead = el("div", { className: "traj-section-head", text: "REFACTOR SIGNALS · files the scrum repeatedly flagged as dead / redundant / stub / needs-rewrite" });
body.append(sigHead);
const explain = el("div", { className: "traj-section-explain", text:
"Aggregates across all scrum iterations. A phrase hit = one time the reviewer used language like 'remove', 'duplicate', 'refactor', 'pseudocode', 'orphaned'. " +
"Files near the top are the strongest refactor candidates — the scrum keeps calling them out. Click a row to jump to its per-iteration trace."
});
body.append(explain);
const table = el("div", { className: "traj-table" });
sigs.slice(0, 30).forEach(s => {
const r = el("div", { className: "traj-row" });
r.append(el("div", { className: "traj-col-rank", text: String(sigs.indexOf(s) + 1) }));
r.append(el("div", { className: "traj-col-file", text: s.file }));
r.append(el("div", { className: "traj-col-hits", text: `${s.hits}×` }));
const topPhrases = Object.entries(s.phrases).sort((a,b)=>b[1]-a[1]).slice(0,3)
.map(([p,n]) => `${p} (${n})`).join(", ");
r.append(el("div", { className: "traj-col-phrases", text: topPhrases }));
r.append(el("div", { className: "traj-col-iters", text: `${s.iterations} iter` }));
r.addEventListener("click", () => {
state.selected = { type: "file", id: `/home/profit/lakehouse/${s.file}` };
renderContext();
document.querySelector('#views button[data-view="trace"]').click();
});
table.append(r);
});
body.append(table);
// SECTION 2 — per-file trajectory: pick the top-5 refactor candidates and
// show their score/conf delta across iterations inline.
if (sigs.length) {
body.append(el("div", { className: "traj-section-head", text: "SCORE TRAJECTORY — top refactor candidates" }));
const grid = el("div", { className: "traj-spark-grid" });
for (const s of sigs.slice(0, 6)) {
const card = el("div", { className: "traj-spark" });
card.append(el("div", { className: "traj-spark-file", text: s.file }));
// pull history
const hist = await fetch(`/data/file/${encodeURIComponent("/home/profit/lakehouse/" + s.file)}`)
.then(r => r.json()).catch(() => ({ history: [] }));
const runs = hist.history ?? [];
if (runs.length === 0) { card.append(el("div", { className: "traj-spark-empty", text: "no history" })); }
else {
const line = el("div", { className: "traj-spark-line" });
runs.forEach((h,i) => {
const pt = el("div", { className: "traj-spark-pt" });
pt.append(el("div", { className: "traj-pt-score", text: h.score != null ? `${h.score}/10` : "?" }));
pt.append(el("div", { className: "traj-pt-conf", text: `${h.conf_avg ?? "-"}%` }));
pt.append(el("div", { className: "traj-pt-label", text: `iter${i+1}` }));
line.append(pt);
if (i < runs.length - 1) line.append(el("div", { className: "traj-spark-arrow", text: "→" }));
});
card.append(line);
// delta summary
if (runs.length >= 2) {
const first = runs[0], last = runs[runs.length - 1];
const dScore = (last.score != null && first.score != null) ? (last.score - first.score) : null;
const dConf = (last.conf_avg != null && first.conf_avg != null) ? (last.conf_avg - first.conf_avg) : null;
const delta = el("div", { className: "traj-spark-delta" });
if (dScore != null) delta.append(el("span", { text: `Δscore ${dScore > 0 ? "+" : ""}${dScore.toFixed(1)}`, className: dScore < 0 ? "delta-down" : dScore > 0 ? "delta-up" : "" }));
if (dConf != null) delta.append(el("span", { text: ` · Δconf ${dConf > 0 ? "+" : ""}${dConf}%`, className: dConf > 0 ? "delta-up" : dConf < 0 ? "delta-down" : "" }));
card.append(delta);
}
}
card.addEventListener("click", () => {
state.selected = { type: "file", id: `/home/profit/lakehouse/${s.file}` };
renderContext();
document.querySelector('#views button[data-view="trace"]').click();
});
grid.append(card);
}
body.append(grid);
}
}
// ───── METRICS ─────
function metricBox(label, big, kind, opts = {}) {
// opts: { source, good, explain }
// source = where the number comes from (data path)
// good = the "what's a healthy value" sentence
// explain = one-line definition of what this counts
const box = el("div", { className: "metric" + (kind ? " " + kind : "") });
box.append(el("div", { className: "m-label", text: label }));
box.append(el("div", { className: "m-big", text: big }));
if (opts.explain) box.append(el("div", { className: "m-sub m-explain", text: opts.explain }));
if (opts.source) box.append(el("div", { className: "m-sub m-source", text: "SOURCE · " + opts.source }));
if (opts.good) box.append(el("div", { className: "m-sub m-good", text: "GOOD · " + opts.good }));
return box;
}
function drawMetrics() {
const grid = document.getElementById("metric-grid");
clear(grid);
// Kick off pathway fetch in parallel; render when it resolves so the
// rest of the metrics grid appears immediately. The cards append to
// the grid after the synchronous block below — they'll show up at
// the bottom of the grid within a tick of first render.
fetch("/data/pathway_stats").then(r => r.ok ? r.json() : null).then(j => {
if (!j || !j.stats) return;
const s = j.stats;
const w = j.scrum_window ?? {};
// Activity metric — is the hot-swap firing at all?
grid.append(metricBox("pathway reuse rate", `${Math.round((w.pathway_reuse_rate ?? 0) * 100)}%`,
(w.pathway_reuse_rate ?? 0) > 0.1 ? "good" : (w.pathway_reuse_rate ?? 0) > 0 ? "warn" : "bad", {
explain: "% of recent reviews where a pathway hot-swap fired (narrow fingerprint match + 0.80 success rate + ≥3 replays + audit_consensus pass + 0.90 similarity).",
source: `scrum_reviews.jsonl .pathway_hot_swap_hit over last ${w.reviews ?? 0} reviews (${w.hot_swap_hits ?? 0} hits)`,
good: "≥10% sustained = index earning its keep. <10% over many iters = fingerprint too narrow or probation too strict. 0% on fresh install is expected (no replays yet).",
}));
// Value metric — how much compute did hot-swap actually save?
const saved = w.avg_rungs_saved_per_commit ?? 0;
grid.append(metricBox("avg rungs saved", saved.toFixed(2),
saved >= 1 ? "good" : saved > 0 ? "warn" : "bad", {
explain: "Average ladder rungs skipped per committed review by hot-swap. Rungs_saved = recommended_rung - 1 when the recommended model succeeded (otherwise 0).",
source: "scrum_reviews.jsonl .rungs_saved averaged",
good: "Every 1.0 here ≈ one less model call per review. At 21 files/iter, 1.0 saved = 21 cloud calls avoided. Value only counts when the replay actually succeeded.",
}));
// Stability metric — retired pathways indicate the learning loop is correcting itself.
grid.append(metricBox("pathways tracked", String(s.total_pathways),
s.total_pathways > 0 ? "good" : "warn", {
explain: `Total pathway traces stored. ${s.retired} retired (below 0.80 success after ≥3 replays). ${s.with_audit_pass} audit-passed, eligible for hot-swap probation.`,
source: "/vectors/pathway/stats",
good: `Grows monotonically with scrum runs. Retired=${s.retired} is HEALTHY — it means the learning loop is pruning pathways that stopped working. replay_success_rate=${(s.replay_success_rate*100).toFixed(0)}% aggregates all historical replays.`,
}));
}).catch(() => {});
const byTier = { auto:0, dry_run:0, simulation:0, block:0, unknown:0 };
state.reviews.forEach(r => { const t = r.gradient_tier ?? "unknown"; if (byTier[t] != null) byTier[t]++; });
const total = state.reviews.length || 1;
const confRows = state.reviews.filter(r => r.confidence_avg != null);
const avg = confRows.length ? Math.round(confRows.reduce((a,r)=>a+r.confidence_avg,0)/confRows.length) : 0;
const verdictCount = { pass:0, needs_patch:0, fail:0, unknown:0 };
state.reviews.forEach(r => { const v=r.verdict??"unknown"; if(verdictCount[v]!=null) verdictCount[v]++; });
const findingsTotal = state.reviews.reduce((a,r)=>a+(r.findings_count??0),0);
const critTotal = state.reviews.reduce((a,r)=>a+(r.critical_failures_count??0),0);
const verTotal = state.reviews.reduce((a,r)=>a+(r.verified_components_count??0),0);
const usage = state.services?.subsystems?.find(n=>n.id==="usage")?.stats ?? {};
const journal = state.services?.subsystems?.find(n=>n.id==="journal")?.stats ?? {};
grid.append(metricBox("avg confidence", `${avg}%`, avg>=85?"good":avg>=70?"warn":"bad", {
explain: "Self-assessed probability per suggestion, averaged across every review.",
source: "scrum_reviews.jsonl .confidence_avg",
good: "≥85% — model is confident. 70-84% routine. <70% means the scrum is uncertain and findings need human review.",
}));
grid.append(metricBox("scrum reviews", String(state.reviews.length), "good", {
explain: "Every source file reviewed by the scrum master, across all iterations.",
source: `${state.metrics.length} scrum runs tracked in scrum_loop_metrics.jsonl`,
good: "Grows every run — 21 files × N iterations. Stall = pipeline broken.",
}));
grid.append(metricBox("critical failures", String(critTotal), critTotal>50?"bad":critTotal>10?"warn":"good", {
explain: "Hard FAILs flagged by the forensic reviewer — pseudocode, fake implementations, unwired invariants. Each one is a concrete code-level gap.",
source: "scrum_reviews.jsonl .critical_failures_count (forensic JSON format only)",
good: "Trending DOWN each iteration = fixes are landing. Rising = new gaps surfacing faster than we close them.",
}));
grid.append(metricBox("verified components", String(verTotal), verTotal>0?"good":"warn", {
explain: "What the scrum CONFIRMED is working — with file/line evidence. The inverse of critical_failures.",
source: "scrum_reviews.jsonl .verified_components_count",
good: "Trending UP = the system has more provably-real parts over time. Should grow as fixes land.",
}));
grid.append(metricBox("findings captured", String(findingsTotal), "good", {
explain: "Total individual suggestions the scrum produced across all reviews (tables + JSON).",
source: "scrum_reviews.jsonl .findings_count summed",
good: "Higher = more scrutiny per file. Per-file average ≥10 means the review is substantive.",
}));
grid.append(metricBox("journal events", String(journal.total_events_created ?? 0), "good", {
explain: "Mutation events recorded via ADR-012 append-only journal. Every ingest/delta-write should emit one.",
source: "/journal/stats → total_events_created",
good: "Should grow with ingest traffic. 1 = only a test probe fired; internal callers still unwired on most paths (P9-001).",
}));
grid.append(metricBox("v1 requests", String(usage.requests ?? 0), "good", {
explain: "Calls through the Universal API /v1/chat endpoint (Phase 38). Captures all scrum + audit traffic.",
source: `/v1/usage → requests. ${(usage.total_tokens ?? 0).toLocaleString()} tokens total`,
good: "Every iteration adds ~21 requests. Stall = scrum paused OR callers bypassing the gateway (P44-style bypass).",
}));
// gradient bar
const gb = el("div", { className: "metric" });
gb.append(el("div", { className: "m-label", text: "permission gradient" }));
gb.append(el("div", { className: "m-big", text: String(state.reviews.length) }));
gb.append(el("div", { className: "m-sub m-explain", text: "Tiers the scrum's suggestions by confidence: how much auto-apply we can trust per file." }));
const bar = el("div", { className: "bar" });
bar.append(el("span", { className: "seg-auto", style: { width: `${100*byTier.auto/total}%` } }));
bar.append(el("span", { className: "seg-dry_run", style: { width: `${100*byTier.dry_run/total}%` } }));
bar.append(el("span", { className: "seg-simulation", style: { width: `${100*byTier.simulation/total}%` } }));
bar.append(el("span", { className: "seg-block", style: { width: `${100*byTier.block/total}%` } }));
gb.append(bar);
gb.append(el("div", { className: "m-sub", text: `auto ${byTier.auto} · dry_run ${byTier.dry_run} · sim ${byTier.simulation} · block ${byTier.block}` }));
gb.append(el("div", { className: "m-sub m-good", text:
"AUTO (≥90%): ship the suggestion. DRY_RUN (70-89): apply then diff. SIMULATION (50-69): test first. BLOCK (<50): human review — the model doesn't trust itself."
}));
grid.append(gb);
const vb = el("div", { className: "metric" });
vb.append(el("div", { className: "m-label", text: "verdict distribution" }));
vb.append(el("div", { className: "m-big", text: String(verdictCount.pass + verdictCount.needs_patch + verdictCount.fail) }));
vb.append(el("div", { className: "m-sub m-explain", text: "Forensic audit verdict per file: pass = works, needs_patch = fixable gaps, fail = not trustable." }));
vb.append(el("div", { className: "m-sub", text: `pass ${verdictCount.pass} · needs_patch ${verdictCount.needs_patch} · fail ${verdictCount.fail}` }));
vb.append(el("div", { className: "m-sub m-source", text: "SOURCE · scrum_reviews.jsonl .verdict (forensic JSON only — markdown rows count as unknown)" }));
grid.append(vb);
}
// ───── KB ─────
function drawKB() {
const grid = document.getElementById("kb-grid");
clear(grid);
// Explanatory banner — each iteration the scrum re-reviews every
// target file and writes a row here. A card = one file's latest
// review. Click to drill into its trace across all iterations.
const banner = el("div", { className: "kb-banner" });
banner.append(el("div", { className: "kb-banner-title", text: "KNOWLEDGE BASE — every source file reviewed by the scrum master" }));
banner.append(el("div", { className: "kb-banner-body", text:
"Each card below is the LATEST scrum review of one source file. The review itself lives in data/_kb/scrum_reviews.jsonl. " +
"Fields: score (scrum's alignment rating, 1-10 vs PRD intent), conf (model's self-assessed confidence per suggestion, avg'd), " +
"findings (# of suggestions), crit (critical_failures — hard FAILs found), verified (verified_components — what's confirmed working). " +
"Pills show: permission gradient (can we trust auto-apply), verdict (pass/needs_patch/fail), output format (JSON = forensic, markdown = legacy). " +
"Click a card to see its trace across all iterations (iter 1 → iter N) and watch scores trend."
}));
grid.append(banner);
const byFile = new Map();
state.reviews.forEach(r => { if (r.file) byFile.set(r.file, r); });
const rows = [...byFile.values()].sort((a,b) => (b.confidence_avg??0) - (a.confidence_avg??0));
// Quick stats above the cards
const statLine = el("div", { className: "kb-statline" });
const avgConf = rows.length ? Math.round(rows.reduce((a,r)=>a+(r.confidence_avg??0),0) / rows.length) : 0;
const scoreMean = rows.filter(r=>r.alignment_score!=null);
const avgScore = scoreMean.length ? (scoreMean.reduce((a,r)=>a+r.alignment_score,0) / scoreMean.length).toFixed(1) : "?";
const blockCount = rows.filter(r => r.gradient_tier === "block").length;
statLine.append(el("span", { text: `${rows.length} files tracked` }));
statLine.append(el("span", { text: `mean score ${avgScore}/10` }));
statLine.append(el("span", { text: `mean confidence ${avgConf}%` }));
statLine.append(el("span", { text: `${blockCount} blocked (need human review)`, className: blockCount > 0 ? "stat-warn" : "" }));
grid.append(statLine);
rows.forEach(r => {
const card = el("div", { className: "kb-file", data: { file: r.file } });
card.append(el("div", { className: "kf-path", text: r.file }));
const meta = el("div", { className: "kf-meta" });
const scoreSpan = el("span", { className: "kf-score", text: `${r.alignment_score ?? "?"}/10` });
scoreSpan.title = "Scrum's alignment score (1-10) — how well this file matches PRD intent. Lower = more gaps.";
meta.append(scoreSpan);
const confSpan = el("span", { text: `conf ${r.confidence_avg ?? "-"}%` });
confSpan.title = "Average self-confidence across suggestions. <70% = model uncertain, treat carefully.";
meta.append(confSpan);
const findingsSpan = el("span", { text: `${r.findings_count ?? 0} findings` });
findingsSpan.title = "Total suggestions in this review (table rows or JSON array entries).";
meta.append(findingsSpan);
const critSpan = el("span", { text: `${r.critical_failures_count ?? 0} crit` });
critSpan.title = "Critical failures: pseudocode, fake implementations, unwired invariants. Hard FAILs.";
if ((r.critical_failures_count ?? 0) > 0) critSpan.style.color = "var(--red)";
meta.append(critSpan);
const verSpan = el("span", { text: `${r.verified_components_count ?? 0} verified` });
verSpan.title = "Verified components: things the scrum CONFIRMED work, with file/line evidence.";
if ((r.verified_components_count ?? 0) > 0) verSpan.style.color = "var(--green)";
meta.append(verSpan);
meta.append(el("span", { text: (r.accepted_model ?? "").split("/").pop(), attrs: { title: "Which model produced this review" } }));
card.append(meta);
const pills = el("div", { className: "kf-meta" });
if (r.gradient_tier) {
const p = el("span", { className: `pill tier-${r.gradient_tier}`, text: r.gradient_tier });
p.title = ({
auto: "AUTO — confidence ≥90%, suggestions safe to apply automatically",
dry_run: "DRY_RUN — confidence 70-89%, apply then review the diff",
simulation: "SIMULATION — confidence 50-69%, test in sandbox first",
block: "BLOCK — confidence <50%, requires human review, do not auto-apply",
})[r.gradient_tier] ?? r.gradient_tier;
pills.append(p);
}
if (r.verdict) {
const p = el("span", { className: `pill ver-${r.verdict}`, text: r.verdict });
p.title = ({
pass: "PASS — scrum confirms this file meets its PRD intent",
needs_patch: "NEEDS_PATCH — gaps exist but are fixable; scrum has concrete suggestions",
fail: "FAIL — file cannot be trusted for its claimed purpose without structural changes",
})[r.verdict] ?? r.verdict;
pills.append(p);
}
if (r.output_format) {
const p = el("span", { className: `pill fmt-${r.output_format}`, text: r.output_format });
p.title = r.output_format === "forensic_json"
? "FORENSIC_JSON — structured output with verdict/critical/verified/missing fields. Richer signal."
: "MARKDOWN — legacy tabular output. Lower structure; we only extract confidence scalars from these.";
pills.append(p);
}
card.append(pills);
card.addEventListener("click", () => {
state.selected = { type: "file", id: r.file };
renderContext();
document.querySelector('#views button[data-view="trace"]').click();
});
grid.append(card);
});
}
// ───── CONSOLE ─────
// Persistent selection across polls so tab switches survive.
state.consoleSvc = "gateway";
// Hook tab buttons once
document.querySelectorAll("#con-tabs button").forEach(b => {
b.addEventListener("click", () => {
document.querySelectorAll("#con-tabs button").forEach(x => x.classList.remove("on"));
b.classList.add("on");
state.consoleSvc = b.dataset.svc;
drawConsole();
});
});
async function drawConsole() {
const log = document.getElementById("console-log");
clear(log);
const unit = document.getElementById("con-unit");
if (unit) unit.textContent = "";
if (state.consoleSvc === "summary") {
drawConsoleSummary(log);
return;
}
// Per-service log tail
const svc = state.consoleSvc;
try {
const res = await fetch(`/data/logs/${svc}?n=120`).then(r => r.json());
if (unit && res.unit) unit.textContent = `unit · ${res.unit}`;
if (res.error) {
log.append(lineInfo(`[error] ${res.error}`, "cl-err"));
return;
}
const lines = res.lines ?? [];
if (!lines.length) { log.append(lineInfo("(no log lines — unit may have just started)", "cl-info")); return; }
lines.forEach(l => {
const cls = /\berror\b|\bERROR\b|panic|\[ERROR|failed/.test(l) ? "cl-err"
: /\bwarn\b|\bWARN\b|\bwarning\b|\[WARN/.test(l) ? "cl-warn"
: /\baccepted\b|\bok\b|\bOK\b|success|complete|ready/.test(l) ? "cl-ok"
: "cl-info";
log.append(lineInfo(l, cls));
});
// autoscroll to bottom
log.scrollTop = log.scrollHeight;
} catch (e) {
log.append(lineInfo(`[fetch-error] ${e}`, "cl-err"));
}
}
function lineInfo(text, cls) {
return el("div", { className: "cl-line " + cls, text });
}
function drawConsoleSummary(log) {
const info = t => lineInfo(t, "cl-info");
const ok = t => lineInfo(t, "cl-ok");
const warn = t => lineInfo(t, "cl-warn");
const err = t => lineInfo(t, "cl-err");
log.append(info(`# Lakehouse VCP · ${new Date().toLocaleTimeString()}`));
log.append(info(`# Services`));
for (const n of state.services?.nodes ?? []) {
const line = `[${String(n.status).padEnd(8)}] ${n.label}`;
log.append(n.status === "healthy" ? ok(line) : n.status === "down" ? err(line) : warn(line));
}
log.append(info(`# Subsystems`));
for (const s of state.services?.subsystems ?? []) {
log.append(info(` ${String(s.id).padEnd(10)} ${JSON.stringify(s.stats ?? {}).slice(0, 120)}`));
}
log.append(info(`# Recent overrides (layer 10)`));
for (const o of state.overrides.slice(-6)) {
log.append(warn(` [${o.ts}] ${o.task_signature}: ${o.human_fix}`));
}
log.append(info(`# Model trust accumulated`));
const agg = {};
for (const t of state.trust) {
const k = t.accepted_model ?? "?";
agg[k] = agg[k] ?? { accepts:0, thin:0, attempts:0 };
agg[k].accepts++;
agg[k].thin += t.thin_rejections ?? 0;
agg[k].attempts += t.attempts_made ?? 0;
}
for (const [m, s] of Object.entries(agg)) {
log.append(info(` ${String(m).padEnd(48)} accepts=${s.accepts} thin=${s.thin} attempts=${s.attempts}`));
}
}
// ───── boot ─────
poll();
setInterval(poll, POLL_MS);
window.addEventListener("resize", () => { if (state.services && state.view === "map") drawMap(state.services); });