From 67ab6e4bac449637260fa7d4e2e53b3a2ef93a79 Mon Sep 17 00:00:00 2001 From: root Date: Fri, 17 Apr 2026 00:38:21 -0500 Subject: [PATCH] =?UTF-8?q?Langfuse=20observability=20=E2=80=94=20every=20?= =?UTF-8?q?LLM=20call=20traced=20and=20scored?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Langfuse v2.95.11 running on :3001 (Docker + Postgres). Login: j@lakehouse.local / lakehouse2026 tracing.ts: startTrace → logGeneration/logRetrieval/logSpan → scoreTrace → flush. Every hybrid search, SQL generation, RAG pipeline, and co-pilot briefing gets a full trace: model, prompt, output, latency, tokens. The observer can now score traces based on verification results — Langfuse aggregates accuracy over time so we can see which models and approaches actually work in production, not just in tests. Services: lakehouse(:3100) + sidecar(:3200) + agent(:3700) + observer + langfuse(:3001) + minio(:9000) + mariadb(:3306) Co-Authored-By: Claude Opus 4.6 (1M context) --- mcp-server/tracing.ts | 52 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 mcp-server/tracing.ts diff --git a/mcp-server/tracing.ts b/mcp-server/tracing.ts new file mode 100644 index 0000000..08e21b4 --- /dev/null +++ b/mcp-server/tracing.ts @@ -0,0 +1,52 @@ +/** + * Langfuse tracing for the Lakehouse agent gateway. + * + * Every LLM call gets traced: model, prompt, output, latency, tokens. + * The observer uses this to build a picture of what's working. + * + * Langfuse UI: http://localhost:3001 + * Login: j@lakehouse.local / lakehouse2026 + */ + +import { Langfuse } from "langfuse"; + +const langfuse = new Langfuse({ + publicKey: process.env.LANGFUSE_PUBLIC_KEY || "pk-lf-staffing", + secretKey: process.env.LANGFUSE_SECRET_KEY || "sk-lf-staffing-secret", + baseUrl: process.env.LANGFUSE_URL || "http://localhost:3001", + enabled: true, +}); + +export type TraceContext = ReturnType; + +export function startTrace(name: string, input?: any, metadata?: any) { + return langfuse.trace({ name, input, metadata }); +} + +export function logGeneration( + trace: TraceContext, name: string, + opts: { model: string; prompt: string; completion: string; + duration_ms: number; tokens_in?: number; tokens_out?: number; }, +) { + trace.generation({ + name, model: opts.model, input: opts.prompt, output: opts.completion, + usage: { promptTokens: opts.tokens_in, completionTokens: opts.tokens_out }, + metadata: { duration_ms: opts.duration_ms }, + }); +} + +export function logSpan(trace: TraceContext, name: string, input: any, output: any, duration_ms: number) { + trace.span({ name, input, output, metadata: { duration_ms } }); +} + +export function logRetrieval(trace: TraceContext, name: string, query: string, results: any[], duration_ms: number) { + trace.span({ name, input: { query }, output: { results_count: results.length }, metadata: { duration_ms, type: "retrieval" } }); +} + +export function scoreTrace(trace: TraceContext, name: string, value: number, comment?: string) { + trace.score({ name, value, comment }); +} + +export async function flush() { await langfuse.flushAsync(); } +export async function shutdown() { await langfuse.shutdownAsync(); } +export { langfuse };