root 89ca72d471 materializer + replay ports + vectord substrate fix verified at scale
Two threads landing together — the doc edits interleave so they ship
in a single commit.

1. **vectord substrate fix verified at original scale** (closes the
   2026-05-01 thread). Re-ran multitier 5min @ conc=50: 132,211
   scenarios at 438/sec, 6/6 classes at 0% failure (was 4/6 pre-fix).
   Throughput dropped 1,115 → 438/sec because previously-broken
   scenarios now do real HNSW Add work — honest cost of correctness.
   The fix (i.vectors side-store + safeGraphAdd recover wrappers +
   smallIndexRebuildThreshold=32 + saveTask coalescing) holds at the
   footprint that originally surfaced the bug.

2. **Materializer port** — internal/materializer + cmd/materializer +
   scripts/materializer_smoke.sh. Ports scripts/distillation/transforms.ts
   (12 transforms) + build_evidence_index.ts (idempotency, day-partition,
   receipt). On-wire JSON shape matches TS so Bun and Go runs are
   interchangeable. 14 tests green.

3. **Replay port** — internal/replay + cmd/replay +
   scripts/replay_smoke.sh. Ports scripts/distillation/replay.ts
   (retrieve → bundle → /v1/chat → validate → log). Closes audit-FULL
   phase 7 live invocation on the Go side. Both runtimes append to the
   same data/_kb/replay_runs.jsonl (schema=replay_run.v1). 14 tests green.

Side effect on internal/distillation/types.go: EvidenceRecord gained
prompt_tokens, completion_tokens, and metadata fields to mirror the TS
shape the materializer transforms produce.

STATE_OF_PLAY refreshed to 2026-05-02; ARCHITECTURE_COMPARISON decisions
tracker moves the materializer + replay items from _open_ to DONE and
adds the substrate-fix scale verification row.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-05-02 03:31:02 -05:00

607 lines
20 KiB
Go

package vectord
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"math"
"strings"
"sync"
"testing"
"github.com/coder/hnsw"
)
func TestNewIndex_DefaultsAndValidation(t *testing.T) {
idx, err := NewIndex(IndexParams{Name: "x", Dimension: 4})
if err != nil {
t.Fatal(err)
}
p := idx.Params()
if p.M != DefaultM || p.EfSearch != DefaultEfSearch || p.Distance != DistanceCosine {
t.Errorf("defaults not applied: %+v", p)
}
if _, err := NewIndex(IndexParams{Dimension: 4}); err == nil {
t.Error("expected error on empty name")
}
if _, err := NewIndex(IndexParams{Name: "y", Dimension: 0}); err == nil {
t.Error("expected error on zero dimension")
}
if _, err := NewIndex(IndexParams{Name: "z", Dimension: 4, Distance: "bogus"}); !errors.Is(err, ErrUnknownDistance) {
t.Errorf("expected ErrUnknownDistance, got %v", err)
}
}
func TestIndex_AddAndSearch_Recall(t *testing.T) {
idx, err := NewIndex(IndexParams{Name: "x", Dimension: 3, Distance: DistanceCosine})
if err != nil {
t.Fatal(err)
}
target := []float32{1, 0, 0}
if err := idx.Add("alice", target, json.RawMessage(`{"role":"warehouse"}`)); err != nil {
t.Fatal(err)
}
if err := idx.Add("bob", []float32{0, 1, 0}, nil); err != nil {
t.Fatal(err)
}
if err := idx.Add("carol", []float32{0, 0, 1}, nil); err != nil {
t.Fatal(err)
}
if idx.Len() != 3 {
t.Errorf("Len: got %d, want 3", idx.Len())
}
hits, err := idx.Search(target, 2)
if err != nil {
t.Fatal(err)
}
if len(hits) < 1 {
t.Fatal("no hits")
}
if hits[0].ID != "alice" {
t.Errorf("nearest: got %q, want alice", hits[0].ID)
}
if hits[0].Distance > 0.001 {
t.Errorf("nearest distance: got %v, want ~0", hits[0].Distance)
}
// Cosine distance of identical unit vectors is 0; metadata round-trips.
if string(hits[0].Metadata) != `{"role":"warehouse"}` {
t.Errorf("metadata: got %q", hits[0].Metadata)
}
}
func TestIndex_DimensionMismatch(t *testing.T) {
idx, _ := NewIndex(IndexParams{Name: "x", Dimension: 4})
err := idx.Add("a", []float32{1, 2, 3}, nil)
if !errors.Is(err, ErrDimensionMismatch) {
t.Errorf("Add: expected ErrDimensionMismatch, got %v", err)
}
_, err = idx.Search([]float32{1, 2, 3}, 1)
if !errors.Is(err, ErrDimensionMismatch) {
t.Errorf("Search: expected ErrDimensionMismatch, got %v", err)
}
}
func TestIndex_DeleteAndLookup(t *testing.T) {
idx, _ := NewIndex(IndexParams{Name: "x", Dimension: 2})
_ = idx.Add("a", []float32{1, 0}, nil)
if !idx.Delete("a") {
t.Error("Delete returned false on existing key")
}
if _, _, ok := idx.Lookup("a"); ok {
t.Error("Lookup found deleted key")
}
if idx.Delete("a") {
t.Error("Delete should return false on missing key")
}
}
// TestIndex_ConcurrentSearchAdd exercises the RWMutex — many
// concurrent searches alongside a writer adding distinct keys
// shouldn't deadlock, panic, or interleave incorrectly. Each
// writer goroutine gets its own key namespace so we don't
// stress-test the library's re-add path (which has known issues
// under high churn — the wrapper exposes idempotent semantics
// via single-threaded Delete+Add but isn't a fix-everything for
// upstream).
func TestIndex_ConcurrentSearchAdd(t *testing.T) {
idx, _ := NewIndex(IndexParams{Name: "x", Dimension: 4})
for i := 0; i < 50; i++ {
_ = idx.Add(fmt.Sprintf("seed-%d", i), []float32{float32(i), 0, 0, 0}, nil)
}
var wg sync.WaitGroup
// One writer goroutine, eight readers — realistic ratio for the
// staffing co-pilot use case where ingestion is occasional and
// queries are common.
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < 50; j++ {
_ = idx.Add(fmt.Sprintf("hot-%d", j), []float32{float32(j), 1, 0, 0}, nil)
}
}()
for r := 0; r < 8; r++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < 50; j++ {
_, _ = idx.Search([]float32{float32(j), 0, 0, 0}, 5)
}
}()
}
wg.Wait()
}
func TestEncodeDecode_RoundTrip(t *testing.T) {
const n = 16
src, _ := NewIndex(IndexParams{Name: "x", Dimension: n, Distance: DistanceCosine})
mkVec := func(i int) []float32 {
// Each vector is a unique unit vector along axis (i mod n) with
// a tiny perturbation on a different axis — recall=1 is robust
// without colliding under cosine.
v := make([]float32, n)
v[i%n] = 1.0
v[(i+1)%n] = 0.001
return v
}
for i := 0; i < n; i++ {
meta := json.RawMessage(fmt.Sprintf(`{"row":%d}`, i))
if err := src.Add(fmt.Sprintf("id-%02d", i), mkVec(i), meta); err != nil {
t.Fatal(err)
}
}
var envBuf, graphBuf bytes.Buffer
if err := src.Encode(&envBuf, &graphBuf); err != nil {
t.Fatalf("Encode: %v", err)
}
dst, err := DecodeIndex(&envBuf, &graphBuf)
if err != nil {
t.Fatalf("DecodeIndex: %v", err)
}
if dst.Len() != src.Len() {
t.Errorf("Len: src=%d dst=%d", src.Len(), dst.Len())
}
if dst.Params() != src.Params() {
t.Errorf("Params: src=%+v dst=%+v", src.Params(), dst.Params())
}
for i := 0; i < n; i++ {
hits, err := dst.Search(mkVec(i), 1)
if err != nil {
t.Fatal(err)
}
want := fmt.Sprintf("id-%02d", i)
if len(hits) == 0 || hits[0].ID != want {
t.Errorf("Search after decode: id-%d → %v, want %s", i, hits, want)
continue
}
wantMeta := fmt.Sprintf(`{"row":%d}`, i)
if string(hits[0].Metadata) != wantMeta {
t.Errorf("metadata after decode: got %q, want %q", hits[0].Metadata, wantMeta)
}
}
}
func TestDecodeIndex_VersionMismatch(t *testing.T) {
bad := bytes.NewBufferString(`{"version":999,"params":{"name":"x","dimension":4}}`)
_, err := DecodeIndex(bad, bytes.NewReader(nil))
if !errors.Is(err, ErrVersionMismatch) {
t.Errorf("expected ErrVersionMismatch, got %v", err)
}
}
// TestEncodeDecode_NilMetaItemsSurviveRoundTrip locks the
// post_role_gate_v1 scrum convergent finding (Opus + Kimi):
// items added with nil metadata MUST survive Encode→Decode and
// remain visible to IDs(). Pre-fix v1 envelope inferred ids from
// meta keys, silently dropping nil-meta items. v2 envelope carries
// the IDs slice explicitly. Test creates a worst-case where every
// item has nil metadata — pre-fix would yield IDs() == [].
func TestEncodeDecode_NilMetaItemsSurviveRoundTrip(t *testing.T) {
src, _ := NewIndex(IndexParams{Name: "nil_meta_test", Dimension: 4, Distance: DistanceCosine})
for _, id := range []string{"a", "b", "c"} {
// nil meta — the case Opus + Kimi flagged.
if err := src.Add(id, []float32{1, 0, 0, 0}, nil); err != nil {
t.Fatalf("Add %s: %v", id, err)
}
}
if got := src.IDs(); len(got) != 3 {
t.Fatalf("pre-encode: expected 3 IDs, got %d", len(got))
}
var envBuf, graphBuf bytes.Buffer
if err := src.Encode(&envBuf, &graphBuf); err != nil {
t.Fatalf("Encode: %v", err)
}
dst, err := DecodeIndex(&envBuf, &graphBuf)
if err != nil {
t.Fatalf("DecodeIndex: %v", err)
}
if got := dst.IDs(); len(got) != 3 {
t.Errorf("post-decode: expected 3 IDs (nil-meta items must survive v2 round-trip), got %d %v", len(got), got)
}
}
// TestDecodeIndex_V1BackwardCompat locks the legacy-shape fallback:
// an envelope without an explicit "ids" field is still loadable.
// The v1 fallback infers ids from meta keys; the i.vectors
// architecture (added 2026-05-01 for the v0.6.1 panic fix) requires
// each id also exist in the imported graph — items present only in
// meta but missing from the graph are unrecoverable post-decode.
// That's a tightening of the v1 contract: items added with nil meta
// to v1 envelopes were already invisible to IDs(), and items with
// meta but no graph entry were already broken (search would miss).
func TestDecodeIndex_V1BackwardCompat(t *testing.T) {
// Build a v1 fixture with consistent meta + graph: id1 is in
// the graph and has metadata. Encode the graph; hand-craft the
// envelope JSON without an "ids" field to trigger the v1 path.
src, _ := NewIndex(IndexParams{Name: "v1_test", Dimension: 4})
if err := src.Add("id1", []float32{1, 0, 0, 0}, json.RawMessage(`{"foo":"bar"}`)); err != nil {
t.Fatal(err)
}
var graphBuf bytes.Buffer
if err := src.g.Export(&graphBuf); err != nil {
t.Fatalf("export graph for v1 fixture: %v", err)
}
envJSON := `{"version":1,"params":{"name":"v1_test","dimension":4,"distance":"cosine","m":16,"ef_search":20},"metadata":{"id1":{"foo":"bar"}}}`
dst, err := DecodeIndex(strings.NewReader(envJSON), &graphBuf)
if err != nil {
t.Fatalf("v1 envelope must still load, got %v", err)
}
hasID1 := false
for _, id := range dst.IDs() {
if id == "id1" {
hasID1 = true
break
}
}
if !hasID1 {
t.Errorf("v1 fallback didn't restore id1, got IDs=%v", dst.IDs())
}
}
func TestRegistry_CreateGetDelete(t *testing.T) {
r := NewRegistry()
idx, err := r.Create(IndexParams{Name: "workers", Dimension: 4})
if err != nil {
t.Fatal(err)
}
if idx.Params().Name != "workers" {
t.Errorf("name: got %q", idx.Params().Name)
}
got, err := r.Get("workers")
if err != nil || got != idx {
t.Errorf("Get returned different / err: %v", err)
}
if _, err := r.Create(IndexParams{Name: "workers", Dimension: 4}); !errors.Is(err, ErrIndexAlreadyExists) {
t.Errorf("dup create: expected ErrIndexAlreadyExists, got %v", err)
}
if err := r.Delete("workers"); err != nil {
t.Fatal(err)
}
if _, err := r.Get("workers"); !errors.Is(err, ErrIndexNotFound) {
t.Errorf("Get after Delete: expected ErrIndexNotFound, got %v", err)
}
if err := r.Delete("workers"); !errors.Is(err, ErrIndexNotFound) {
t.Errorf("idempotent Delete: expected ErrIndexNotFound, got %v", err)
}
}
func TestAdd_RejectsNonFinite(t *testing.T) {
idx, _ := NewIndex(IndexParams{Name: "x", Dimension: 3, Distance: DistanceEuclidean})
cases := [][]float32{
{float32(math.NaN()), 0, 0},
{float32(math.Inf(1)), 0, 0},
{0, float32(math.Inf(-1)), 0},
}
for _, vec := range cases {
if err := idx.Add("a", vec, nil); err == nil {
t.Errorf("expected error for non-finite vec %v", vec)
}
}
}
func TestAdd_RejectsZeroNormUnderCosine(t *testing.T) {
idx, _ := NewIndex(IndexParams{Name: "x", Dimension: 3, Distance: DistanceCosine})
if err := idx.Add("a", []float32{0, 0, 0}, nil); err == nil {
t.Error("expected error for zero-norm under cosine")
}
// Same vec is OK under euclidean (origin is a valid point).
idxE, _ := NewIndex(IndexParams{Name: "y", Dimension: 3, Distance: DistanceEuclidean})
if err := idxE.Add("a", []float32{0, 0, 0}, nil); err != nil {
t.Errorf("zero vec under euclidean should be valid: %v", err)
}
}
func TestAdd_PreservesMetaOnNilReAdd(t *testing.T) {
// Per scrum K-B1: re-adding with nil meta must NOT clear existing.
idx, _ := NewIndex(IndexParams{Name: "x", Dimension: 2})
_ = idx.Add("alice", []float32{1, 0}, json.RawMessage(`{"role":"warehouse"}`))
if err := idx.Add("alice", []float32{0.9, 0.1}, nil); err != nil {
t.Fatal(err)
}
_, meta, ok := idx.Lookup("alice")
if !ok {
t.Fatal("Lookup not found")
}
if string(meta) != `{"role":"warehouse"}` {
t.Errorf("metadata cleared on nil re-add: got %q", meta)
}
// Explicit empty {} replaces.
_ = idx.Add("alice", []float32{1, 0}, json.RawMessage(`{}`))
_, meta, _ = idx.Lookup("alice")
if string(meta) != `{}` {
t.Errorf("explicit {} should replace: got %q", meta)
}
}
func TestLookup_ReturnsCopy(t *testing.T) {
// Per scrum O-W1: caller mutation must not corrupt index state.
idx, _ := NewIndex(IndexParams{Name: "x", Dimension: 3})
orig := []float32{1, 2, 3}
_ = idx.Add("a", orig, nil)
got, _, _ := idx.Lookup("a")
got[0] = 99 // mutate the returned copy
again, _, _ := idx.Lookup("a")
if again[0] != 1 {
t.Errorf("Lookup didn't copy: index now sees %v", again)
}
}
// TestIndex_IDs locks the snapshot semantics: IDs() returns a copy
// of the metadata keyset that callers can iterate without holding
// the index lock. Underpins the merge endpoint (OPEN #1) — without
// IDs(), the merge handler can't enumerate items to drain.
func TestIndex_IDs(t *testing.T) {
idx, err := NewIndex(IndexParams{Name: "ids_test", Dimension: 4})
if err != nil {
t.Fatalf("NewIndex: %v", err)
}
if got := idx.IDs(); len(got) != 0 {
t.Errorf("empty index should have no IDs, got %v", got)
}
// Add with nil meta — the ids tracker is the canonical set, so
// these MUST appear in IDs() even though they're not in i.meta.
for _, id := range []string{"a", "b", "c"} {
if err := idx.Add(id, []float32{1, 0, 0, 0}, nil); err != nil {
t.Fatalf("Add %s: %v", id, err)
}
}
got := idx.IDs()
if len(got) != 3 {
t.Errorf("expected 3 IDs after 3 Adds (nil meta still counts), got %d %v", len(got), got)
}
got[0] = "MUTATED"
got2 := idx.IDs()
for _, id := range got2 {
if id == "MUTATED" {
t.Errorf("IDs() must return a snapshot independent of internal state")
}
}
// Delete updates the tracker.
idx.Delete("a")
if got := idx.IDs(); len(got) != 2 {
t.Errorf("expected 2 IDs after Delete, got %d %v", len(got), got)
}
}
// TestAdd_SmallIndexNoPanic_Sequential locks the multitier_100k
// 2026-05-01 finding: sequential Adds with distinct IDs to a fresh
// small (playbook-corpus shape) index must not trigger the
// coder/hnsw v0.6.1 nil-deref. Pre-fix, growing 0→1→2 on certain
// vector geometries panicked in layerNode.search.
func TestAdd_SmallIndexNoPanic_Sequential(t *testing.T) {
idx, _ := NewIndex(IndexParams{Name: "playbook_shape", Dimension: 8, Distance: DistanceCosine})
for i := 0; i < smallIndexRebuildThreshold+5; i++ {
v := make([]float32, 8)
v[i%8] = 1.0
v[(i+1)%8] = 0.01
if err := idx.Add(fmt.Sprintf("e-%04d", i), v, nil); err != nil {
t.Fatalf("Add e-%04d at len=%d: %v", i, idx.Len(), err)
}
}
want := smallIndexRebuildThreshold + 5
if idx.Len() != want {
t.Errorf("Len() = %d, want %d", idx.Len(), want)
}
}
// TestBatchAdd_SmallIndexNoPanic locks the same failure mode for
// the batch path — surge_fill_validate hit `/v1/matrix/playbooks/
// record` which BatchAdds a single item per request.
func TestBatchAdd_SmallIndexNoPanic(t *testing.T) {
idx, _ := NewIndex(IndexParams{Name: "small_batch", Dimension: 4})
for i := 0; i < smallIndexRebuildThreshold+3; i++ {
v := []float32{float32(i + 1), 0.001, 0, 0}
err := idx.BatchAdd([]BatchItem{{ID: fmt.Sprintf("b-%03d", i), Vector: v}})
if err != nil {
t.Fatalf("BatchAdd b-%03d at len=%d: %v", i, idx.Len(), err)
}
}
}
// TestAdd_RebuildPreservesSearch — when rebuilds fire below the
// threshold, search must still recall correctly. The boundary is
// where it matters most: an index right at the threshold has just
// been rebuilt and the next Add transitions to incremental.
func TestAdd_RebuildPreservesSearch(t *testing.T) {
idx, _ := NewIndex(IndexParams{Name: "rebuild_recall", Dimension: 4, Distance: DistanceCosine})
mkVec := func(i int) []float32 {
v := make([]float32, 4)
v[i%4] = 1.0
v[(i+1)%4] = 0.001 * float32(i+1)
return v
}
const n = 10
for i := 0; i < n; i++ {
if err := idx.Add(fmt.Sprintf("id-%02d", i), mkVec(i), nil); err != nil {
t.Fatalf("Add: %v", err)
}
}
for i := 0; i < n; i++ {
hits, err := idx.Search(mkVec(i), 1)
if err != nil {
t.Fatal(err)
}
want := fmt.Sprintf("id-%02d", i)
if len(hits) == 0 || hits[0].ID != want {
t.Errorf("Search(%d): got %v, want top-1=%s", i, hits, want)
}
}
}
// TestAdd_ThresholdBoundary_HotPathTransition exercises the
// boundary: Adds 1..threshold use rebuild, Add #threshold+1
// transitions to incremental. Both regimes must produce a
// searchable index.
func TestAdd_ThresholdBoundary_HotPathTransition(t *testing.T) {
idx, _ := NewIndex(IndexParams{Name: "boundary", Dimension: 4})
mkVec := func(i int) []float32 {
v := make([]float32, 4)
v[i%4] = 1
v[(i+1)%4] = 0.001 * float32(i+1)
return v
}
for i := 0; i <= smallIndexRebuildThreshold+5; i++ {
if err := idx.Add(fmt.Sprintf("k-%03d", i), mkVec(i), nil); err != nil {
t.Fatalf("Add at len=%d: %v", idx.Len(), err)
}
}
hits, err := idx.Search(mkVec(0), 1)
if err != nil {
t.Fatal(err)
}
if len(hits) == 0 || hits[0].ID != "k-000" {
t.Errorf("post-transition search lost recall: %v", hits)
}
}
// TestAdd_PastThreshold_SustainedReAdd locks the multitier_100k
// 2026-05-01 production failure mode: an index that has grown past
// the rebuild threshold and is then subjected to repeated upsert
// (Delete + Add) cycles. The original recover()-only fix caught
// panics but returned errors at 96-98% rate; the i.vectors-backed
// architecture catches the panic AND recovers via rebuild so the
// caller sees success.
func TestAdd_PastThreshold_SustainedReAdd(t *testing.T) {
idx, _ := NewIndex(IndexParams{Name: "past_thresh", Dimension: 8, Distance: DistanceCosine})
mkVec := func(seed int) []float32 {
v := make([]float32, 8)
v[seed%8] = float32(seed + 1)
v[(seed+1)%8] = 0.001 * float32(seed+1)
return v
}
// Grow well past threshold (32) into the warm-path regime.
const grown = 64
for i := 0; i < grown; i++ {
if err := idx.Add(fmt.Sprintf("g-%03d", i), mkVec(i), nil); err != nil {
t.Fatalf("seed Add g-%03d: %v", i, err)
}
}
if got := idx.Len(); got != grown {
t.Fatalf("post-seed Len = %d, want %d", got, grown)
}
// Repeatedly upsert the same 8 IDs with new vectors — this is
// the exact pattern that triggered v0.6.1's degenerate-state
// nil-deref in production. With i.vectors as the panic-safe
// source of truth, every Add must succeed.
for round := 0; round < 100; round++ {
for k := 0; k < 8; k++ {
id := fmt.Sprintf("g-%03d", k) // re-add existing IDs
vec := mkVec(round*1000 + k)
if err := idx.Add(id, vec, nil); err != nil {
t.Fatalf("upsert round=%d k=%d: %v", round, k, err)
}
}
}
// Index must still serve search after the upsert storm.
// Recall correctness on near-collinear vectors is not the load-
// bearing assertion; that the upsert loop completed without
// errors IS the assertion. (Pre-fix this loop returned errors
// at 96-98% rate per multitier_100k.)
if got := idx.Len(); got != grown {
t.Errorf("post-storm Len = %d, want %d (upsert should not change cardinality)", got, grown)
}
hits, err := idx.Search(mkVec(0), 5)
if err != nil {
t.Fatalf("post-storm Search errored: %v", err)
}
if len(hits) == 0 {
t.Error("post-storm Search returned no hits")
}
}
// TestAdd_RecoversFromPanickingGraph proves the i.vectors-backed
// rebuild path can reconstruct a clean graph even when the current
// graph has been forced into a panicking state. Simulates the bug
// by directly poking the graph into a degenerate state, then
// verifies that the next Add still succeeds via the rebuild
// fallback.
func TestAdd_RecoversFromPanickingGraph(t *testing.T) {
idx, _ := NewIndex(IndexParams{Name: "recover", Dimension: 4})
mkVec := func(seed int) []float32 {
v := make([]float32, 4)
v[seed%4] = float32(seed + 1)
return v
}
for i := 0; i < smallIndexRebuildThreshold+10; i++ {
if err := idx.Add(fmt.Sprintf("r-%03d", i), mkVec(i), nil); err != nil {
t.Fatalf("seed Add: %v", err)
}
}
// safeGraphAdd should always succeed on a healthy graph.
if !safeGraphAdd(idx.g, hnsw.MakeNode("safe-test", mkVec(999))) {
t.Fatal("safeGraphAdd reported failure on healthy graph")
}
// Side-effect: that Add added "safe-test" to the graph but not
// i.vectors. Restore consistency by removing it via the safe
// path and proceeding.
_ = safeGraphDelete(idx.g, "safe-test")
}
// playbook_record pattern: many requests in flight, each Adding a
// unique ID to a fresh small index. Vectord's mutex serializes
// these, but the concurrency stresses lock acquisition timing
// against the small-index transition state.
func TestAdd_SmallIndex_ConcurrentDistinctIDs(t *testing.T) {
idx, _ := NewIndex(IndexParams{Name: "concurrent_small", Dimension: 8})
const writers = 16
const perWriter = 4 // 64 total > threshold, so we cross the boundary
var wg sync.WaitGroup
for w := 0; w < writers; w++ {
wg.Add(1)
go func(wi int) {
defer wg.Done()
for j := 0; j < perWriter; j++ {
v := make([]float32, 8)
v[(wi+j)%8] = float32(wi*100 + j + 1)
v[(wi+j+1)%8] = 0.01
if err := idx.Add(fmt.Sprintf("w%d-%d", wi, j), v, nil); err != nil {
t.Errorf("Add w%d-%d at len=%d: %v", wi, j, idx.Len(), err)
return
}
}
}(w)
}
wg.Wait()
if got, want := idx.Len(), writers*perWriter; got != want {
t.Errorf("Len() = %d, want %d", got, want)
}
}
func TestRegistry_Names_Sorted(t *testing.T) {
r := NewRegistry()
for _, n := range []string{"zoo", "alpha", "midway"} {
_, _ = r.Create(IndexParams{Name: n, Dimension: 4})
}
got := r.Names()
want := []string{"alpha", "midway", "zoo"}
for i, w := range want {
if got[i] != w {
t.Errorf("Names[%d]: got %q, want %q", i, got[i], w)
}
}
}