auditor/checks/dynamic.ts — wraps runHybridFixture, maps layer results to Findings. Placeholder-style errors (404/unimplemented/ slice N) → info; other failures → warn. Always emits a summary finding with real numbers (shipped/placeholder phase counts + per- layer latency). Live-tested against current stack: 2 info findings, 0 warnings — all shipped layers actually work. auditor/checks/inference.ts — wraps the run_codereview reviewer pattern from llm_team_ui.py, adapted for claim-vs-diff verification. Calls /v1/chat provider=ollama_cloud model=gpt-oss:120b. Requests strict JSON response with claim_verdicts[] and unflagged_gaps[]. A strong claim marked "not backed" by cloud → BLOCK severity; moderate → warn; weak → info. Cloud-unreachable or unparseable-output → info (never blocks on the reviewer being down). Live-tested against PR #1 (this PR, 20 claims, 39KB diff): - 36.9s round-trip - 7 block + 23 warn + 2 info findings - gpt-oss:120b correctly flagged "Fully-functional auditor (tasks 1-9 complete)" as not-backed (only 6/10 tasks done at that commit) — accurate catch - Some false positives from the original 15KB truncation threshold (cloud missed gitea.ts, flagged "no Gitea client present") - Bumped MAX_DIFF_CHARS from 15000 to 40000 to fit the full PR diff in context; reviewer precision improves accordingly Tasks 5 + 6 completed. Remaining: #7 (KB query), #8 (verdict + Gitea poster), #9 (poller), #10 (end-to-end proof), #12 (upsert UPDATE-drops-doc_refs).
92 lines
3.2 KiB
TypeScript
92 lines
3.2 KiB
TypeScript
// Dynamic execution check — runs the hybrid fixture and maps its
|
|
// layer results to auditor Findings.
|
|
//
|
|
// A layer that fails with a "not implemented / 404 / slice N" error
|
|
// gets severity=info (honest placeholder signal). A layer that fails
|
|
// any other way gets severity=warn (something actually broke).
|
|
// An info-level summary finding is always emitted carrying the real
|
|
// numbers — shipped/placeholder phase counts, per-layer latency.
|
|
|
|
import { runHybridFixture } from "../fixtures/hybrid_38_40_45.ts";
|
|
import type { Finding } from "../types.ts";
|
|
|
|
const PLACEHOLDER_MARKERS = [
|
|
"unimplemented",
|
|
" 404 ", "(404)", " 405 ", "(405)",
|
|
"slice 3", "slice 4", "slice 5",
|
|
"endpoint not built", "not yet",
|
|
];
|
|
|
|
function isPlaceholderFailure(err?: string): boolean {
|
|
if (!err) return false;
|
|
const low = err.toLowerCase();
|
|
return PLACEHOLDER_MARKERS.some(m => low.includes(m.toLowerCase()));
|
|
}
|
|
|
|
export async function runDynamicCheck(): Promise<Finding[]> {
|
|
const findings: Finding[] = [];
|
|
|
|
let result;
|
|
try {
|
|
result = await runHybridFixture();
|
|
} catch (e) {
|
|
// Fixture itself crashed — can't run dynamic check at all.
|
|
return [
|
|
{
|
|
check: "dynamic",
|
|
severity: "warn",
|
|
summary: `hybrid fixture crashed before completing: ${(e as Error).message.slice(0, 140)}`,
|
|
evidence: [(e as Error).message],
|
|
},
|
|
];
|
|
}
|
|
|
|
// Per-layer findings for every non-ok layer.
|
|
for (const layer of result.layers) {
|
|
if (layer.ok) continue;
|
|
const placeholder = isPlaceholderFailure(layer.error);
|
|
findings.push({
|
|
check: "dynamic",
|
|
severity: placeholder ? "info" : "warn",
|
|
summary: placeholder
|
|
? `hybrid fixture layer ${layer.layer} (Phase ${layer.phase}) honestly reports unimplemented`
|
|
: `hybrid fixture layer ${layer.layer} (Phase ${layer.phase}) failed — not a placeholder, a real failure`,
|
|
evidence: [
|
|
`evidence: ${layer.evidence.slice(0, 160)}`,
|
|
...(layer.error ? [`error: ${layer.error.slice(0, 160)}`] : []),
|
|
`latency_ms: ${layer.latency_ms}`,
|
|
],
|
|
});
|
|
}
|
|
|
|
// One overall summary with real numbers so the report shows what
|
|
// DID pass plus per-layer timing.
|
|
const metrics_preview = Object.entries(result.real_numbers)
|
|
.slice(0, 10)
|
|
.map(([k, v]) => `${k}=${v}`);
|
|
findings.push({
|
|
check: "dynamic",
|
|
severity: "info",
|
|
summary: `hybrid fixture overall=${result.overall}, shipped [${result.shipped_phases.join(", ")}], placeholder [${result.placeholder_phases.join(", ")}]`,
|
|
evidence: metrics_preview.length > 0 ? metrics_preview : ["no metrics emitted"],
|
|
});
|
|
|
|
// If the fixture ran at all but nothing passed, elevate one of the
|
|
// summary findings to warn — something more than "all honest
|
|
// placeholders" is wrong.
|
|
if (result.overall === "fail") {
|
|
findings.push({
|
|
check: "dynamic",
|
|
severity: "warn",
|
|
summary: `hybrid fixture: 0 layers passed (overall=fail)`,
|
|
evidence: [
|
|
"a total fixture fail usually means a precondition service is down",
|
|
"(gateway /health / sidecar / Langfuse /v1/chat) — NOT necessarily",
|
|
"the PR's code problem. Check service status before blaming the PR.",
|
|
],
|
|
});
|
|
}
|
|
|
|
return findings;
|
|
}
|