/** * Langfuse tracing for the Lakehouse agent gateway. * * Every LLM call gets traced: model, prompt, output, latency, tokens. * The observer uses this to build a picture of what's working. * * Langfuse UI: http://localhost:3001 * Login: j@lakehouse.local / lakehouse2026 */ import { Langfuse } from "langfuse"; const langfuse = new Langfuse({ publicKey: process.env.LANGFUSE_PUBLIC_KEY || "pk-lf-staffing", secretKey: process.env.LANGFUSE_SECRET_KEY || "sk-lf-staffing-secret", baseUrl: process.env.LANGFUSE_URL || "http://localhost:3001", enabled: true, }); export type TraceContext = ReturnType; export function startTrace(name: string, input?: any, metadata?: any) { return langfuse.trace({ name, input, metadata }); } export function logGeneration( trace: TraceContext, name: string, opts: { model: string; prompt: string; completion: string; duration_ms: number; tokens_in?: number; tokens_out?: number; }, ) { trace.generation({ name, model: opts.model, input: opts.prompt, output: opts.completion, usage: { promptTokens: opts.tokens_in, completionTokens: opts.tokens_out }, metadata: { duration_ms: opts.duration_ms }, }); } export function logSpan(trace: TraceContext, name: string, input: any, output: any, duration_ms: number) { trace.span({ name, input, output, metadata: { duration_ms } }); } export function logRetrieval(trace: TraceContext, name: string, query: string, results: any[], duration_ms: number) { trace.span({ name, input: { query }, output: { results_count: results.length }, metadata: { duration_ms, type: "retrieval" } }); } export function scoreTrace(trace: TraceContext, name: string, value: number, comment?: string) { trace.score({ name, value, comment }); } export async function flush() { await langfuse.flushAsync(); } export async function shutdown() { await langfuse.shutdownAsync(); } export { langfuse };