golangLAKEHOUSE/scripts/cutover/gen_real_queries.go
root 95f155b017 real_006: distribution-shift test on rows 10-59 of fill_events
Methodology fix: gen_real_queries.go gains -offset N flag. Every prior
real_NNN test sourced queries from rows 0-9 of fill_events.parquet
(default -limit 10), so the substrate's published "8/10 cold-pass top-1
= judge-best" was measured on a memorized slice, not held-out data.

real_006 samples 50 fresh rows (offset 10, never seen by the workers
or ethereal_workers corpora). Same harness, same local qwen2.5:latest
judge, same K=10. ~14 min wall total. Local-only, no cloud calls.

Headline findings:

- Cold-pass top-1 = judge-best (rank match): 41/50 (82%) vs real_001's
  8/10 (80%) — substrate generalizes at rank level.
- Strict (rating ≥ 2): 34/50 (68%) — 12-point drop from real_001's
  80%. ~7 of 41 "no-discovery" queries had cold top-1 the judge rated
  1; the corpus has gaps for some role-city combos in the v3 slice.
- Verbatim lift: 9/9 discoveries → warm top-1 (clean, matches real_001 2/2)
- Paraphrase recovery: 6/9 → top-1, 9/9 any-rank
- Quality regressed: 3/50 — Q43 is the structural one

Q43 (Packer at Midway Distribution / Chicago IL) regressed from
rating 5 to rating 2 on warm pass with `warm_boosted_count=0` and
`playbook_recorded=false`. Q18 (Shipping Clerks at the same client+city)
recorded a playbook entry. The regression suggests Q18's recording
leaked into Q43 via the warm-pass playbook corpus retrieval surface
even though the role gate from real_002 should have blocked it.
Three possible paths: extractor failed on one query, gate fires on
boost path but not Shape B inject, or cosine drift puts the recorded
worker close enough to Q43's embedding that warm-pass retrieval picks
it up directly. Diagnosis is the next move.

Three same-(client, city) clusters tested:
- Heritage Foods Gary IN × 3 distinct roles: clean, distinct workers
- Riverfront Steel Columbus OH × 4: cosine-level confusion (Q9/Q25
  surface same worker w-281 for Assemblers vs Quality Techs at cold-
  pass), but no playbook bleed
- Midway Distribution Chicago IL × 3: Q43 regression as above

What this confirms: substrate works on the fresh distribution at the
rank level, verbatim lift is real, paraphrase recovery is real.

What this falsifies: real_002's role-gate fix is not structurally
airtight. The bleed pattern can still fire under conditions the
prior tests didn't reach.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-05-05 04:54:03 -05:00

175 lines
5.6 KiB
Go

// gen_real_queries — pull N rows from fill_events.parquet and translate
// each into a coordinator-style natural-language query.
//
// Output: one query per line, written to stdout (intended for redirect
// into tests/reality/real_coord_queries.txt and then fed to
// scripts/playbook_lift.sh as --queries=<path>).
//
// Why: the lift harness's standard query corpus is hand-crafted to
// stress multi-constraint matching. Real coordinator demand has a
// different distribution — single-role, single-geo, count + time —
// and we want to probe whether the substrate handles that shape too.
// The fill_events parquet on the Rust side is the closest thing to
// "real demand" we have on disk (123 rows, sourced from staffing
// fixture generation but shaped like genuine fills).
package main
import (
"context"
"flag"
"fmt"
"log"
"strings"
"github.com/apache/arrow-go/v18/arrow/memory"
"github.com/apache/arrow-go/v18/parquet/file"
"github.com/apache/arrow-go/v18/parquet/pqarrow"
)
func main() {
src := flag.String("src", "/home/profit/lakehouse/data/datasets/fill_events.parquet", "fill_events parquet path")
limit := flag.Int("limit", 10, "number of source rows to read")
offset := flag.Int("offset", 0, "skip the first N rows (lets reality tests sample beyond the memorized real_001 slice)")
styles := flag.String("styles", "need", "comma-separated styles to emit per row (need|client_first|looking|shorthand|all)")
flag.Parse()
r, err := file.OpenParquetFile(*src, false)
if err != nil {
log.Fatalf("open %s: %v", *src, err)
}
defer r.Close()
pr, err := pqarrow.NewFileReader(r, pqarrow.ArrowReadProperties{}, memory.DefaultAllocator)
if err != nil {
log.Fatalf("pqarrow reader: %v", err)
}
tbl, err := pr.ReadTable(context.Background())
if err != nil {
log.Fatalf("read table: %v", err)
}
defer tbl.Release()
// Field order must match parquet schema (see scripts/cutover dev probe):
// 3=client, 5=city, 6=state, 7=role, 8=count, 10=at, 12=deadline.
client := tbl.Column(3).Data().Chunk(0)
city := tbl.Column(5).Data().Chunk(0)
state := tbl.Column(6).Data().Chunk(0)
role := tbl.Column(7).Data().Chunk(0)
count := tbl.Column(8).Data().Chunk(0)
at := tbl.Column(10).Data().Chunk(0)
deadline := tbl.Column(12).Data().Chunk(0)
totalRows := int(tbl.NumRows())
start := *offset
if start < 0 {
start = 0
}
if start > totalRows {
start = totalRows
}
end := start + *limit
if end > totalRows {
end = totalRows
}
stylesList := parseStyles(*styles)
fmt.Println("# Real-shape coordinator queries — generated from fill_events.parquet")
fmt.Println("# (real-shape demand data; queries built mechanically from event rows).")
fmt.Printf("# Source: %s (%d rows total, rows [%d,%d) emitted, styles=%v)\n", *src, totalRows, start, end, stylesList)
fmt.Println("#")
fmt.Println("# Styles:")
fmt.Println("# need: 'Need N {role}{s} in {city} {state} starting at {at} for {client}'")
fmt.Println("# — matches scripts/playbook_lift's extractRoleFromNeed regex")
fmt.Println("# client_first: '{client} needs N {role}{s} in {city} {state} at {at}'")
fmt.Println("# looking: 'Looking for N {role}{s} at {client} in {city} {state} for {at} shift'")
fmt.Println("# shorthand: 'N {role}{s} {city} {state} {at} {client}'")
fmt.Println("#")
fmt.Println("# Only 'need' currently extracts a role. The other three test the")
fmt.Println("# substrate's bleed behavior when the role gate is silently disabled.")
fmt.Println()
for i := start; i < end; i++ {
ev := event{
client: client.ValueStr(i),
city: city.ValueStr(i),
state: state.ValueStr(i),
role: role.ValueStr(i),
count: count.ValueStr(i),
at: at.ValueStr(i),
}
if dl := deadline.ValueStr(i); dl != "" && dl != "(null)" {
ev.deadline = dl
}
for _, s := range stylesList {
fmt.Println(formatQuery(ev, s))
}
}
}
type event struct {
client, city, state, role, count, at, deadline string
}
func formatQuery(e event, style string) string {
r := pluralize(e.role, e.count)
switch style {
case "client_first":
// No "Need ... in" anchor — extractRoleFromNeed returns "" on this.
return fmt.Sprintf("%s needs %s %s in %s %s at %s", e.client, e.count, r, e.city, e.state, e.at)
case "looking":
return fmt.Sprintf("Looking for %s %s at %s in %s %s for %s shift", e.count, r, e.client, e.city, e.state, e.at)
case "shorthand":
return fmt.Sprintf("%s %s %s %s %s %s", e.count, r, e.city, e.state, e.at, e.client)
default:
// "need" form — the original real_001 shape, regex-extractor wins.
q := fmt.Sprintf("Need %s %s in %s %s starting at %s for %s", e.count, r, e.city, e.state, e.at, e.client)
if e.deadline != "" {
q += ", deadline " + e.deadline
}
return q
}
}
// parseStyles unpacks the comma-separated -styles flag, with "all"
// expanding to every supported style and unknown tokens dropped
// (with a log line so callers know).
func parseStyles(csv string) []string {
all := []string{"need", "client_first", "looking", "shorthand"}
if strings.TrimSpace(csv) == "all" {
return all
}
out := []string{}
for _, s := range strings.Split(csv, ",") {
s = strings.TrimSpace(s)
if s == "" {
continue
}
known := false
for _, a := range all {
if a == s {
known = true
break
}
}
if !known {
log.Printf("gen_real_queries: unknown style %q — skipping", s)
continue
}
out = append(out, s)
}
if len(out) == 0 {
return []string{"need"}
}
return out
}
func pluralize(role, count string) string {
if count == "1" {
return role
}
// "Warehouse Associate" → "Warehouse Associates"; "Loader" → "Loaders".
// Naive but fits the staffing-domain vocabulary in fill_events.
return role + "s"
}