Langfuse observability — every LLM call traced and scored
Langfuse v2.95.11 running on :3001 (Docker + Postgres). Login: j@lakehouse.local / lakehouse2026 tracing.ts: startTrace → logGeneration/logRetrieval/logSpan → scoreTrace → flush. Every hybrid search, SQL generation, RAG pipeline, and co-pilot briefing gets a full trace: model, prompt, output, latency, tokens. The observer can now score traces based on verification results — Langfuse aggregates accuracy over time so we can see which models and approaches actually work in production, not just in tests. Services: lakehouse(:3100) + sidecar(:3200) + agent(:3700) + observer + langfuse(:3001) + minio(:9000) + mariadb(:3306) Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
fc6b01c2bf
commit
67ab6e4bac
52
mcp-server/tracing.ts
Normal file
52
mcp-server/tracing.ts
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
/**
|
||||||
|
* Langfuse tracing for the Lakehouse agent gateway.
|
||||||
|
*
|
||||||
|
* Every LLM call gets traced: model, prompt, output, latency, tokens.
|
||||||
|
* The observer uses this to build a picture of what's working.
|
||||||
|
*
|
||||||
|
* Langfuse UI: http://localhost:3001
|
||||||
|
* Login: j@lakehouse.local / lakehouse2026
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { Langfuse } from "langfuse";
|
||||||
|
|
||||||
|
const langfuse = new Langfuse({
|
||||||
|
publicKey: process.env.LANGFUSE_PUBLIC_KEY || "pk-lf-staffing",
|
||||||
|
secretKey: process.env.LANGFUSE_SECRET_KEY || "sk-lf-staffing-secret",
|
||||||
|
baseUrl: process.env.LANGFUSE_URL || "http://localhost:3001",
|
||||||
|
enabled: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
export type TraceContext = ReturnType<typeof langfuse.trace>;
|
||||||
|
|
||||||
|
export function startTrace(name: string, input?: any, metadata?: any) {
|
||||||
|
return langfuse.trace({ name, input, metadata });
|
||||||
|
}
|
||||||
|
|
||||||
|
export function logGeneration(
|
||||||
|
trace: TraceContext, name: string,
|
||||||
|
opts: { model: string; prompt: string; completion: string;
|
||||||
|
duration_ms: number; tokens_in?: number; tokens_out?: number; },
|
||||||
|
) {
|
||||||
|
trace.generation({
|
||||||
|
name, model: opts.model, input: opts.prompt, output: opts.completion,
|
||||||
|
usage: { promptTokens: opts.tokens_in, completionTokens: opts.tokens_out },
|
||||||
|
metadata: { duration_ms: opts.duration_ms },
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
export function logSpan(trace: TraceContext, name: string, input: any, output: any, duration_ms: number) {
|
||||||
|
trace.span({ name, input, output, metadata: { duration_ms } });
|
||||||
|
}
|
||||||
|
|
||||||
|
export function logRetrieval(trace: TraceContext, name: string, query: string, results: any[], duration_ms: number) {
|
||||||
|
trace.span({ name, input: { query }, output: { results_count: results.length }, metadata: { duration_ms, type: "retrieval" } });
|
||||||
|
}
|
||||||
|
|
||||||
|
export function scoreTrace(trace: TraceContext, name: string, value: number, comment?: string) {
|
||||||
|
trace.score({ name, value, comment });
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function flush() { await langfuse.flushAsync(); }
|
||||||
|
export async function shutdown() { await langfuse.shutdownAsync(); }
|
||||||
|
export { langfuse };
|
||||||
Loading…
x
Reference in New Issue
Block a user