Some checks failed
lakehouse/auditor 9 blocking issues: todo!() macro call in tests/real-world/scrum_master_pipeline.ts
Phase 0 — docs/recon/local-distillation-recon.md
Inventories the 23 KB JSONL streams + 20 vector corpora + auditor's
kb_index.ts as substrate for the now.md distillation pipeline. Maps
spec modules to existing producers, identifies real gaps, lists 9
schemas to formalize. ZERO implementation in recon — gating doc only.
Phase 1 — auditor/schemas/distillation/
9 schemas + foundation types + 48 tests passing in 502ms:
types.ts shared validators + canonicalSha256
evidence_record.ts EVIDENCE_SCHEMA_VERSION=1, ModelRole enum
scored_run.ts 4 categories pinned, anchor_grounding ∈ [0,1]
receipt.ts git_sha 40-char, sha256 file refs, validation_pass:bool
playbook.ts non-empty source_run_ids + acceptance_criteria
scratchpad_summary.ts validation_status enum, hash sha256
model_ledger.ts success_rate ∈ [0,1], sample_count ≥ 1
rag_sample.ts success_score ∈ {accepted, partially_accepted}
sft_sample.ts quality_score MUST be 'accepted' (no leak)
preference_sample.ts chosen != rejected, source_run_ids must differ
evidence_record.test.ts 10 tests, JSON-fixture round-trip
schemas.test.ts 30 tests, inline fixtures
realdata.test.ts 8 tests, real-JSONL probe
Real-data validation probe (one of the 3 notables from recon):
46 rows across 7 sources, 100% pass. distilled_facts/procedures alive.
Report at data/_kb/realdata_validation_report.md (also written by the
test). Confirms schema fits existing producers without migration.
Phase 2 scaffold — scripts/distillation/transforms.ts
Promoted PROBES from realdata.test.ts into a real TRANSFORMS array
covering 12 source streams (8 Tier 1 validated + 4 Tier 2 from
recon's untested-streams list). Pure functions: no I/O, no model
calls, no clock reads. Caller supplies recorded_at + sig_hash so
materializer is deterministic by construction.
Spec non-negotiables enforced at schema layer (defense in depth):
- provenance{source_file, sig_hash, recorded_at} required everywhere
- schema_version mismatch hard-rejects (forward-compat gate)
- SFT no-leak: validateSftSample REJECTS partially_accepted, rejected,
needs_human_review — three explicit tests
- Every score has WHY (reasons non-empty)
- Every playbook traces to source (source_run_ids non-empty)
- Every preference has WHY (reason non-empty)
- Receipts substantive (git_sha 40-char, sha256 64-char, validation_pass:bool)
Branch carries uncommitted auditor rebuild work (mode.rs + modes.toml
+ inference.ts + static.ts) blocked on upstream Ollama Cloud kimi-k2
500 ISE; held pending recon-driven design decisions.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
57 lines
2.4 KiB
TypeScript
57 lines
2.4 KiB
TypeScript
// ModelLedgerEntry — aggregate per-task-type-per-model performance.
|
|
// Built by aggregating mode_experiments.jsonl + model_trust.jsonl.
|
|
// Updated rather than appended — one row per (model_name, task_type)
|
|
// representing latest aggregates.
|
|
import {
|
|
ValidationResult, requireString, requireNumber, requireIsoTimestamp, requireStringArray,
|
|
} from "./types";
|
|
|
|
export const MODEL_LEDGER_SCHEMA_VERSION = 1;
|
|
|
|
export interface ModelLedgerEntry {
|
|
schema_version: number;
|
|
model_name: string;
|
|
model_provider: string;
|
|
task_type: string;
|
|
success_rate: number; // [0, 1]
|
|
failure_modes: string[]; // top failure mode tags
|
|
best_partner_model?: string; // pairs well with X (consensus / tie-break)
|
|
escalation_role?: string; // when this model gets escalated TO (or FROM)
|
|
cost_usd_p50?: number;
|
|
latency_ms_p50?: number;
|
|
latency_ms_p95?: number;
|
|
context_window?: number;
|
|
sample_count: number;
|
|
last_updated: string; // ISO 8601
|
|
notes?: string;
|
|
}
|
|
|
|
export function validateModelLedgerEntry(input: unknown): ValidationResult<ModelLedgerEntry> {
|
|
const errors: string[] = [];
|
|
if (typeof input !== "object" || input === null) return { valid: false, errors: ["expected object"] };
|
|
const r = input as Record<string, unknown>;
|
|
let ok = true;
|
|
|
|
if (r.schema_version !== MODEL_LEDGER_SCHEMA_VERSION) {
|
|
errors.push(`schema_version: expected ${MODEL_LEDGER_SCHEMA_VERSION}, got ${JSON.stringify(r.schema_version)}`);
|
|
ok = false;
|
|
}
|
|
ok = requireString(r.model_name, "model_name", errors) && ok;
|
|
ok = requireString(r.model_provider, "model_provider", errors) && ok;
|
|
ok = requireString(r.task_type, "task_type", errors) && ok;
|
|
ok = requireIsoTimestamp(r.last_updated, "last_updated", errors) && ok;
|
|
ok = requireStringArray(r.failure_modes, "failure_modes", errors) && ok;
|
|
|
|
if (!requireNumber(r.success_rate, "success_rate", errors)) ok = false;
|
|
else if ((r.success_rate as number) < 0 || (r.success_rate as number) > 1) {
|
|
errors.push("success_rate: must be in [0, 1]"); ok = false;
|
|
}
|
|
if (!requireNumber(r.sample_count, "sample_count", errors)) ok = false;
|
|
else if ((r.sample_count as number) < 1 || !Number.isInteger(r.sample_count)) {
|
|
errors.push("sample_count: must be positive integer (no aggregate from zero samples)"); ok = false;
|
|
}
|
|
|
|
if (!ok) return { valid: false, errors };
|
|
return { valid: true, value: r as unknown as ModelLedgerEntry };
|
|
}
|