From 56dbfb7d0314995294ff420623716ac3fbac400f Mon Sep 17 00:00:00 2001 From: profit Date: Thu, 23 Apr 2026 00:26:01 -0500 Subject: [PATCH] fact_extractor: project context + fixed verifier-verdict parser MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Two bundled changes. Both came out of J's observation that the verifier was defaulting to UNVERIFIABLE on domain-specific facts because it had no idea what Lakehouse was, which project's code it was reading, or what framework the types belonged to. 1. Project context preamble. Added docs/AUDITOR_CONTEXT.md — a <400- word concise description of the project (crates, services, architecture phases, the auditor's role itself). fact_extractor reads it once, caches it, prepends it to the extract prompt as a "PROJECT CONTEXT (for grounding; do NOT extract from this)" section. Both extractor and verifier now see this context, so statements like "aggregate returns Map" get grounded as "this is a TypeScript function in the Lakehouse auditor subsystem" and the verifier can reason about plausibility instead of guessing. 2. Verifier-verdict parser fix. Gemma2's output format varies between "**Verdict:** CORRECT" and just "* **CORRECT**" inline (observed variance across runs). The old regex required "Verdict:" as a label and missed the second format — causing all verdicts to stay UNCHECKED. Replaced with a two-pass approach: find each fact section start ("**N.**" or "N."), slice to the next section, scan the slice for the first CORRECT|INCORRECT|UNVERIFIABLE token. Handles both formats plus unfenced fallback. Verified: 4-fact test extraction went from 0/4 verdicts scored (pre-fix) to 2/4 CORRECT + 2/4 UNVERIFIABLE (post-fix). The 2 UNVERIFIABLE cases are domain-specific code behavior the verifier legitimately can't confirm without reading source — correct stance, not a parser miss. No new consensus modes yet. J suggested adding codereview or validator as a second pass; holding until we see whether context injection alone gives sufficient signal lift. --- auditor/fact_extractor.ts | 68 ++++++++++++++++++++++++++++++-------- docs/AUDITOR_CONTEXT.md | 69 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 123 insertions(+), 14 deletions(-) create mode 100644 docs/AUDITOR_CONTEXT.md diff --git a/auditor/fact_extractor.ts b/auditor/fact_extractor.ts index 19dd9b0..1352a4c 100644 --- a/auditor/fact_extractor.ts +++ b/auditor/fact_extractor.ts @@ -23,6 +23,23 @@ const LLM_TEAM = process.env.LH_LLM_TEAM_URL ?? "http://localhost:5000"; const EXTRACTOR = process.env.LH_FACT_EXTRACTOR ?? "qwen2.5:latest"; const VERIFIER = process.env.LH_FACT_VERIFIER ?? "gemma2:latest"; const EXTRACT_TIMEOUT_MS = 120_000; +const PROJECT_CONTEXT_FILE = process.env.LH_AUDITOR_CONTEXT_FILE + ?? "/home/profit/lakehouse/docs/AUDITOR_CONTEXT.md"; + +let cachedContext: string | null = null; +async function loadProjectContext(): Promise { + if (cachedContext !== null) return cachedContext; + try { + const { readFile } = await import("node:fs/promises"); + const raw = await readFile(PROJECT_CONTEXT_FILE, "utf8"); + // Cap at 4KB — anything past that is more noise than signal for + // the extractor/verifier's attention budget. + cachedContext = raw.slice(0, 4000); + } catch { + cachedContext = ""; // context file missing → extractor runs without preamble + } + return cachedContext; +} export interface Entity { name: string; @@ -77,6 +94,16 @@ export async function extractFacts(source: string): Promise { extracted_at: new Date().toISOString(), }; + // Prepend project context to the source so the extractor + verifier + // know what codebase/framework these facts belong to. Without this, + // the verifier marks most domain-specific facts as UNVERIFIABLE ("I + // don't know what Lakehouse is"). With it, the verifier can CORRECT- + // stamp facts that align with the stated architecture. + const context = await loadProjectContext(); + const prompt = context.length > 0 + ? `=== PROJECT CONTEXT (for grounding facts; do NOT extract facts from this section) ===\n${context}\n\n=== CONTENT TO EXTRACT FACTS FROM ===\n${source}` + : source; + let resp: Response; try { resp = await fetch(`${LLM_TEAM}/api/run`, { @@ -84,7 +111,7 @@ export async function extractFacts(source: string): Promise { headers: { "content-type": "application/json" }, body: JSON.stringify({ mode: "extract", - prompt: source, + prompt, extractor: EXTRACTOR, verifier: VERIFIER, source: "prompt", @@ -189,25 +216,38 @@ export async function extractFacts(source: string): Promise { } // Parse verifier's free-form output into a per-fact verdict array. -// The verifier output typically looks like: -// **1.** The claim... -// * **Verdict:** CORRECT -// **2.** ... -// **Verdict:** UNVERIFIABLE -// Using matchAll to iterate — returns a verdict array of length -// numFacts; unmatched positions stay UNCHECKED. +// Gemma2 uses several formats depending on prompt mood: +// Format A: **1.** claim... * **Verdict:** CORRECT +// Format B: **1.** claim... * **CORRECT** (no "Verdict:" label) +// Format C: 1. claim... CORRECT +// Strategy: split on fact numbers, then find the first +// CORRECT|INCORRECT|UNVERIFIABLE token in each section. Handles all +// three formats without regex gymnastics. function parseVerifierVerdicts( verifierText: string, numFacts: number, ): Array<"CORRECT" | "INCORRECT" | "UNVERIFIABLE" | "UNCHECKED"> { const out: Array<"CORRECT" | "INCORRECT" | "UNVERIFIABLE" | "UNCHECKED"> = Array(numFacts).fill("UNCHECKED"); - const re = /(?:\*\*|#+\s*)?(\d+)[.):]\s[\s\S]*?\bVerdict\s*:\s*\*?\*?\s*(CORRECT|INCORRECT|UNVERIFIABLE)/gi; - for (const m of verifierText.matchAll(re)) { - const idx = Number(m[1]) - 1; - if (idx >= 0 && idx < numFacts) { - out[idx] = m[2].toUpperCase() as "CORRECT" | "INCORRECT" | "UNVERIFIABLE"; - } + if (!verifierText) return out; + + // Find each fact section start — "**N.**" or "N." at line start — + // and slice out the content up to the NEXT fact number. Each section + // gets scanned for the first CORRECT/INCORRECT/UNVERIFIABLE token. + const starts: Array<{ idx: number; pos: number }> = []; + const header = /(?:^|\n)\s*(?:\*\*)?(\d+)[.)]/g; + for (const m of verifierText.matchAll(header)) { + const factNum = Number(m[1]); + if (!Number.isFinite(factNum)) continue; + starts.push({ idx: factNum - 1, pos: m.index! }); + } + for (let i = 0; i < starts.length; i++) { + const s = starts[i]; + const end = i + 1 < starts.length ? starts[i + 1].pos : verifierText.length; + if (s.idx < 0 || s.idx >= numFacts) continue; + const section = verifierText.slice(s.pos, end); + const v = section.match(/\b(CORRECT|INCORRECT|UNVERIFIABLE)\b/i); + if (v) out[s.idx] = v[1].toUpperCase() as "CORRECT" | "INCORRECT" | "UNVERIFIABLE"; } return out; } diff --git a/docs/AUDITOR_CONTEXT.md b/docs/AUDITOR_CONTEXT.md new file mode 100644 index 0000000..98ba091 --- /dev/null +++ b/docs/AUDITOR_CONTEXT.md @@ -0,0 +1,69 @@ +# Auditor Context — project preamble for fact extraction + +This file is read by `auditor/fact_extractor.ts` and prepended to the +extract-facts prompt sent to llm_team. The goal: give the extractor + +verifier enough grounding to ground domain-specific facts instead of +marking them UNVERIFIABLE by default. + +Keep this short (< 400 words). Verifier only reads the first ~4KB of +the prompt alongside the facts. Longer = noise, not signal. + +Update when: a new Phase lands, a crate is added/removed, the project's +primary domain shifts (e.g. staffing → DevOps). + +--- + +## What Lakehouse is + +Lakehouse is a Rust-first data platform over S3-compatible object +storage. Primary use: a staffing company ingesting legacy CRM data for +AI-powered worker matching, contract fulfillment, and playbook-driven +coordination. + +Architecture: 13 Rust crates + a Python sidecar (Ollama) + TypeScript +sub-agents (auditor, scrum_master, bot). Runs on a single server +(Nvidia A4000, 128GB RAM). All services on localhost: gateway :3100, +sidecar :3200, UI :3300, MCP :3700, observer :3800, MinIO :9000. + +## Key crates (each maps to a responsibility) + +- **shared** — types, Arrow helpers, PII utilities, SecretsProvider +- **proto** — gRPC definitions +- **storaged** — S3/MinIO I/O, AppendLog, ErrorJournal +- **catalogd** — metadata authority (manifests, views, tombstones) +- **queryd** — DataFusion SQL, MemTable cache, compaction +- **ingestd** — CSV/JSON/PDF/Postgres/MySQL ingest +- **vectord** — embeddings, HNSW index, **playbook_memory meta-index** (Phase 19+) +- **vectord-lance** — Lance 4.0 firewall crate (separate Arrow version) +- **journald** — append-only mutation event log +- **aibridge** — Rust↔Python sidecar bridge, context budget + continuation +- **gateway** — Axum HTTP :3100 + gRPC :3101 (Phase 38+ adds /v1/chat) +- **ui** — Dioxus WASM (stale, pre-Phase-9) +- **lance-bench** — standalone benchmark + +## Current architectural direction (Phase 38-44) + +Universal AI Control Plane: a `/v1/chat` OpenAI-compatible API that +routes all LLM traffic through one layer for token accounting + provider +fallback. Truth Layer + Validation Pipeline enforce staffing-domain +invariants (worker eligibility, PII, contract rules). The Auditor +(Phase A of cohesion plan) hard-blocks PR merges on placeholder code. + +## Auditor sub-agent role + +`auditor/` (TypeScript, Bun runtime) polls Gitea every 90s for open PRs. +For each fresh head SHA it runs 4 checks in parallel: static (grep-style +placeholder detection), dynamic (runs the hybrid fixture), inference +(gpt-oss:120b cloud review with N=3 consensus + qwen3-coder:480b +tie-breaker), and kb_query (reads `data/_kb/*.jsonl` for prior evidence). +Verdicts post to Gitea as commit status + review comment. Findings +append to `data/_kb/audit_lessons.jsonl` (path-agnostic signatures for +dedup). Curated scratchpads from tree-split get routed through this +extract-facts pipeline to populate `audit_facts.jsonl` — which is what +you (the extractor) are currently producing. + +## Things that are NOT the auditor + +- The LLM Team UI at `/root/llm_team_ui.py` (devop.live:5000) — a separate product for human-facing multi-model experimentation +- The scrum_master pipeline at `tests/real-world/scrum_master_pipeline.ts` — reviews files, not claims +- The bot at `bot/` — will apply fixes, doesn't audit