Closes the cycle J asked for: curated cloud output lands structured knowledge in the KB so future audits have architectural context, not just a log of per-finding signatures. Three pieces: 1. Inference curation (tree-split) — when diff > 30KB, shard at 4.5KB, summarize each shard via cloud (temp=0, think=false on small shards; think=true on main call). Merge into scratchpad. The cloud verification then runs against the scratchpad, not truncated raw. Eliminates the 40KB MAX_DIFF_CHARS truncation path for large PRs (PR #8 is 102KB — was losing 62KB). Anti-false-positive guard in the prompt: cloud is told scratchpad absence is NOT diff absence, so it doesn't flag curated-out symbols as missing. unflagged_gaps section is dropped entirely when curated (scratchpad can't ground them). 2. fact_extractor — TS client for llm_team_ui's extract-facts mode at localhost:5000/api/run. Sends curated scratchpad through qwen2.5 extractor + gemma2 verifier, parses SSE stream, returns structured {facts, entities, relationships, verification, llm_team_run_id}. Best-effort: if llm_team is down, extraction fails silently and the audit still completes. AWAITED so CLI tools (audit_one.ts) don't exit before extraction lands — the systemd poller has 90s headroom so the extra ~15s doesn't matter. 3. audit_facts.jsonl + checkAuditFacts() — one row per curated audit with the extraction result. kb_query tails the jsonl, explodes entity rows, aggregates by entity name with distinct-PR counting, surfaces entities recurring in 2+ PRs as info findings. Filters out short names (<3 chars, extractor truncation artifacts) and generic types (string/number/etc.) so signal isn't drowned. Verified end-to-end on PR #8: 102KB diff → 23 shards → 1KB scratchpad → qwen2.5 extracted 4 facts + 6 entities + 6 relationships (real code-level knowledge: AggregateOptions<T> type, aggregate<T> async function with real signature, typed relationships). llm_team_run_id cross-references to llm_team's own team_runs table. Also: audit.ts passes (pr_number, head_sha) as InferenceContext so extracted facts are scope-tagged for the KB index.
184 lines
6.4 KiB
TypeScript
184 lines
6.4 KiB
TypeScript
// fact_extractor — routes curated TEXT through llm_team_ui's
|
|
// "knowledge extract facts" mode (mode=extract at /api/run).
|
|
//
|
|
// What it gives us: structured {facts, entities, relationships} from
|
|
// whatever curated blob we send. Auditor sends the tree-split
|
|
// inference scratchpad (the best distillation of what a PR changed).
|
|
// Scrum_master will later send its accepted review bodies.
|
|
//
|
|
// Why route through llm_team and not just extract directly from our
|
|
// own checks: llm_team's extract uses a local EXTRACTOR model
|
|
// (qwen2.5) + a separate VERIFIER (gemma2). This cross-check is the
|
|
// discipline J wants for knowledge going into the playbook — facts
|
|
// go in only after a second model has rated them CORRECT /
|
|
// UNVERIFIABLE. Fast (local models, ~10-20s), free, and matches the
|
|
// codereview pattern J already trusts.
|
|
//
|
|
// SSE parsing: llm_team streams SSE events. We're only interested in
|
|
// the final "response" event with role="final" + the extraction
|
|
// response (role="extraction N"). Parse the JSON from the extractor's
|
|
// response text.
|
|
|
|
const LLM_TEAM = process.env.LH_LLM_TEAM_URL ?? "http://localhost:5000";
|
|
const EXTRACTOR = process.env.LH_FACT_EXTRACTOR ?? "qwen2.5:latest";
|
|
const VERIFIER = process.env.LH_FACT_VERIFIER ?? "gemma2:latest";
|
|
const EXTRACT_TIMEOUT_MS = 120_000;
|
|
|
|
export interface Entity {
|
|
name: string;
|
|
type: string;
|
|
description?: string;
|
|
}
|
|
|
|
export interface Relationship {
|
|
from: string;
|
|
to: string;
|
|
type: string;
|
|
}
|
|
|
|
export interface ExtractedFacts {
|
|
facts: string[];
|
|
entities: Entity[];
|
|
relationships: Relationship[];
|
|
verification: string;
|
|
extractor_model: string;
|
|
verifier_model: string;
|
|
source_preview: string;
|
|
// Populated when the extract run completed server-side (llm_team
|
|
// persists to its own team_runs; this is for our own cross-ref).
|
|
llm_team_run_id?: number;
|
|
extracted_at: string;
|
|
error?: string;
|
|
}
|
|
|
|
/**
|
|
* Run the llm_team extract pipeline on `source` text. Returns
|
|
* structured {facts, entities, relationships}.
|
|
*
|
|
* Returns an object with `error` set if the pipeline failed — never
|
|
* throws, because fact extraction is best-effort enrichment (the
|
|
* primary audit must not break if llm_team is down).
|
|
*/
|
|
export async function extractFacts(source: string): Promise<ExtractedFacts> {
|
|
const base: ExtractedFacts = {
|
|
facts: [],
|
|
entities: [],
|
|
relationships: [],
|
|
verification: "",
|
|
extractor_model: EXTRACTOR,
|
|
verifier_model: VERIFIER,
|
|
source_preview: source.slice(0, 240),
|
|
extracted_at: new Date().toISOString(),
|
|
};
|
|
|
|
let resp: Response;
|
|
try {
|
|
resp = await fetch(`${LLM_TEAM}/api/run`, {
|
|
method: "POST",
|
|
headers: { "content-type": "application/json" },
|
|
body: JSON.stringify({
|
|
mode: "extract",
|
|
prompt: source,
|
|
extractor: EXTRACTOR,
|
|
verifier: VERIFIER,
|
|
source: "prompt",
|
|
skip_cache: true, // cache by prompt would dedup identical
|
|
// scratchpads, but we want fresh extraction
|
|
// for per-audit facts; cheap since local.
|
|
}),
|
|
signal: AbortSignal.timeout(EXTRACT_TIMEOUT_MS),
|
|
});
|
|
} catch (e) {
|
|
return { ...base, error: `fetch failed: ${(e as Error).message}` };
|
|
}
|
|
|
|
if (!resp.ok) {
|
|
const body = await resp.text().catch(() => "");
|
|
return { ...base, error: `llm_team /api/run ${resp.status}: ${body.slice(0, 200)}` };
|
|
}
|
|
|
|
// Stream SSE lines; collect the one extraction response + the run_saved event
|
|
// so we can capture the team-runs ID for cross-ref.
|
|
const decoder = new TextDecoder();
|
|
const reader = resp.body?.getReader();
|
|
if (!reader) return { ...base, error: "no response body" };
|
|
|
|
let buffer = "";
|
|
let extractionText = "";
|
|
let verifierText = "";
|
|
let runId: number | undefined = undefined;
|
|
|
|
try {
|
|
while (true) {
|
|
const { done, value } = await reader.read();
|
|
if (done) break;
|
|
buffer += decoder.decode(value, { stream: true });
|
|
let nl: number;
|
|
while ((nl = buffer.indexOf("\n\n")) >= 0) {
|
|
const chunk = buffer.slice(0, nl);
|
|
buffer = buffer.slice(nl + 2);
|
|
const dataLine = chunk.split("\n").find(l => l.startsWith("data: "));
|
|
if (!dataLine) continue;
|
|
try {
|
|
const ev = JSON.parse(dataLine.slice(6));
|
|
if (ev.type === "response") {
|
|
const role = String(ev.role ?? "");
|
|
if (role.startsWith("extraction")) extractionText = String(ev.text ?? "");
|
|
else if (role === "verifier") verifierText = String(ev.text ?? "");
|
|
} else if (ev.type === "run_saved") {
|
|
const id = Number(ev.run_id);
|
|
if (Number.isFinite(id)) runId = id;
|
|
}
|
|
} catch { /* skip malformed SSE */ }
|
|
}
|
|
}
|
|
} catch (e) {
|
|
return { ...base, error: `SSE read failed: ${(e as Error).message}` };
|
|
}
|
|
|
|
// Pull the JSON object out of extractionText (may be wrapped in ```json fences).
|
|
const parsed = extractFirstJsonObject(extractionText);
|
|
if (!parsed) {
|
|
return { ...base, error: "extractor returned no parseable JSON", verification: verifierText };
|
|
}
|
|
|
|
return {
|
|
...base,
|
|
facts: Array.isArray(parsed.facts) ? parsed.facts.slice(0, 50).map(String) : [],
|
|
entities: Array.isArray(parsed.entities)
|
|
? parsed.entities.slice(0, 30).map((e: any) => ({
|
|
name: String(e?.name ?? ""),
|
|
type: String(e?.type ?? ""),
|
|
description: typeof e?.description === "string" ? e.description.slice(0, 240) : undefined,
|
|
})).filter(e => e.name.length > 0)
|
|
: [],
|
|
relationships: Array.isArray(parsed.relationships)
|
|
? parsed.relationships.slice(0, 30).map((r: any) => ({
|
|
from: String(r?.from ?? ""),
|
|
to: String(r?.to ?? ""),
|
|
type: String(r?.type ?? ""),
|
|
})).filter(r => r.from.length > 0 && r.to.length > 0)
|
|
: [],
|
|
verification: verifierText.slice(0, 1500),
|
|
llm_team_run_id: runId,
|
|
};
|
|
}
|
|
|
|
// Lift the first balanced JSON object out of (possibly fenced) text.
|
|
// Same discipline as inference.ts::extractJson.
|
|
function extractFirstJsonObject(text: string): any | null {
|
|
const cleaned = text.replace(/^```(?:json)?\s*/im, "").replace(/```\s*$/im, "");
|
|
let depth = 0, start = -1;
|
|
for (let i = 0; i < cleaned.length; i++) {
|
|
const c = cleaned[i];
|
|
if (c === "{") { if (depth === 0) start = i; depth++; }
|
|
else if (c === "}") {
|
|
depth--;
|
|
if (depth === 0 && start >= 0) {
|
|
try { return JSON.parse(cleaned.slice(start, i + 1)); } catch { start = -1; }
|
|
}
|
|
}
|
|
}
|
|
return null;
|
|
}
|