root 8b92518d21 G1P: vectord persistence to storaged + scrum (3 fixes incl. 3-way convergent)
Adds optional persistence to vectord (G1's HNSW vector search). Single-
file framed format per index — eliminates the torn-write class that
the 3-way convergent scrum finding identified:

  _vectors/<name>.lhv1  — single binary blob:
      [4 bytes magic "LHV1"]
      [4 bytes envelope_len uint32 BE]
      [envelope bytes — JSON params + metadata + version]
      [graph bytes — raw hnsw.Graph.Export]

Pre-extraction: internal/catalogd/store_client.go → internal/storeclient/
shared package, since both catalogd and vectord need it. Same pattern as
the pre-D5 catalogclient extraction.

Optional via [vectord].storaged_url config (empty = ephemeral mode).
On startup: List + Load each persisted index. After Create / batch Add /
DELETE: Save (or Delete from storaged). Save failures are logged-not-
fatal — in-memory state is the source of truth in flight.

Acceptance smoke G1P 8/8 PASS — kill+restart preserves state, post-
restart search returns dist=0 (graph round-trips exactly), DELETE
removes the file, post-delete restart shows count=0.

All 8 smokes (D1-D6 + G1 + G1P) PASS deterministically. The g1_smoke
gained scripts/g1_smoke.toml that disables persistence so the
in-memory API test stays decoupled from any rehydrate-from-storaged
state contamination.

Cross-lineage scrum on shipped code:
  - Opus 4.7 (opencode):                     1 BLOCK + 5 WARN + 3 INFO
  - Kimi K2-0905 (openrouter):               1 BLOCK + 2 WARN
  - Qwen3-coder (openrouter):                2 BLOCK + 2 WARN + 1 INFO

Fixed (3 — 1 convergent + 2 single-reviewer):
  C1 (Opus + Kimi + Qwen 3-WAY CONVERGENT WARN): Save was non-atomic
    across two PUTs — envelope-succeeds + graph-fails left a half-
    saved index that passed the "both present" List filter and
    silently mismatched metadata against vectors on Load.
    Fix: collapse to single framed file (no torn-write window
    possible).
  O-B1 (Opus BLOCK): isNotFound substring-matched "key not found"
    against the wrapped error message — brittle, any 5xx body
    containing that text would silently misclassify as missing.
    Fix: errors.Is(err, storeclient.ErrKeyNotFound).
  O-I3 (Opus INFO): handleAdd pre-validation only covered id+dim;
    NaN/Inf/zero-norm could still fail mid-batch leaving partial
    commits. Fix: extend pre-validation to call ValidateVector
    (newly exported) per item before any commit.

Dismissed (3 false positives):
  K-B1 + Q-B1 ("safeKey double-escapes %2F segments") — false
    convergent. Wire-protocol escape is decoded by storaged's chi
    router on the way in; on-disk key is the original literal.
    %2F round-trips correctly through PathEscape → URL → chi decode
    → S3 key.
  Q-B2 ("List vulnerable to race conditions") — vectord is single-
    process; no concurrent Save against List in the same vectord.

Deferred (3): rehydrate per-index timeout (G2+ multi-index scale),
saveAfter request ctx (matches G0 timeout deferral), Encode RLock
during slow writer (documented as buffer-only API).

The C1 finding is the strongest signal of the cross-lineage filter:
three independent reviewers all flagged the same torn-write hazard.
Single-file framing eliminates the class — there's now no Persistor
state where envelope and graph can disagree.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-29 01:33:23 -05:00

163 lines
4.5 KiB
Go

package catalogd
import (
"context"
"errors"
"sync"
"testing"
"time"
"git.agentview.dev/profit/golangLAKEHOUSE/internal/storeclient"
)
// memStore is an in-memory Store fake for unit tests.
type memStore struct {
mu sync.Mutex
data map[string][]byte
}
func newMemStore() *memStore { return &memStore{data: map[string][]byte{}} }
func (m *memStore) Put(_ context.Context, key string, body []byte) error {
m.mu.Lock()
defer m.mu.Unlock()
cp := make([]byte, len(body))
copy(cp, body)
m.data[key] = cp
return nil
}
func (m *memStore) Get(_ context.Context, key string) ([]byte, error) {
m.mu.Lock()
defer m.mu.Unlock()
b, ok := m.data[key]
if !ok {
return nil, storeclient.ErrKeyNotFound
}
return b, nil
}
func (m *memStore) List(_ context.Context, prefix string) ([]string, error) {
m.mu.Lock()
defer m.mu.Unlock()
out := []string{}
for k := range m.data {
if len(k) >= len(prefix) && k[:len(prefix)] == prefix {
out = append(out, k)
}
}
return out, nil
}
func mkRegistry(t *testing.T) (*Registry, *memStore) {
t.Helper()
s := newMemStore()
r := NewRegistry(s)
r.now = func() time.Time { return time.Unix(1777435000, 0).UTC() }
return r, s
}
func TestRegister_NewManifest(t *testing.T) {
r, _ := mkRegistry(t)
rc := int64(100)
m, existing, err := r.Register(context.Background(), "workers", "sha256:abc",
[]Object{{Key: "datasets/workers/p1.parquet", Size: 1024}}, &rc)
if err != nil {
t.Fatalf("Register: %v", err)
}
if existing {
t.Error("expected existing=false for new manifest")
}
if m.DatasetID != DatasetIDForName("workers") {
t.Errorf("DatasetID: got %q, want UUIDv5(workers)", m.DatasetID)
}
}
func TestRegister_SameFingerprint_Idempotent(t *testing.T) {
r, _ := mkRegistry(t)
rc := int64(100)
first, _, _ := r.Register(context.Background(), "workers", "sha256:abc",
[]Object{{Key: "p1.parquet", Size: 1024}}, &rc)
// Re-register same name + fingerprint with new objects.
rc2 := int64(200)
second, existing, err := r.Register(context.Background(), "workers", "sha256:abc",
[]Object{{Key: "p1.parquet", Size: 1024}, {Key: "p2.parquet", Size: 2048}}, &rc2)
if err != nil {
t.Fatalf("Register (idempotent): %v", err)
}
if !existing {
t.Error("expected existing=true on idempotent re-register")
}
if second.DatasetID != first.DatasetID {
t.Errorf("DatasetID changed: %q → %q", first.DatasetID, second.DatasetID)
}
if len(second.Objects) != 2 {
t.Errorf("Objects not replaced: got %d, want 2", len(second.Objects))
}
if second.RowCount == nil || *second.RowCount != 200 {
t.Errorf("RowCount not bumped: got %v, want 200", second.RowCount)
}
}
func TestRegister_DifferentFingerprint_Conflict(t *testing.T) {
r, _ := mkRegistry(t)
_, _, _ = r.Register(context.Background(), "workers", "sha256:abc",
[]Object{{Key: "p1.parquet", Size: 1024}}, nil)
_, _, err := r.Register(context.Background(), "workers", "sha256:DIFFERENT",
[]Object{{Key: "p1.parquet", Size: 1024}}, nil)
if !errors.Is(err, ErrFingerprintConflict) {
t.Fatalf("expected ErrFingerprintConflict, got %v", err)
}
}
func TestRehydrate_RecoversManifests(t *testing.T) {
// Build first registry, register 2 manifests.
r1, store := mkRegistry(t)
_, _, _ = r1.Register(context.Background(), "workers", "sha256:a", nil, nil)
_, _, _ = r1.Register(context.Background(), "candidates", "sha256:b", nil, nil)
// Build a second registry against the same store + rehydrate.
r2 := NewRegistry(store)
n, err := r2.Rehydrate(context.Background())
if err != nil {
t.Fatalf("Rehydrate: %v", err)
}
if n != 2 {
t.Errorf("recovered %d, want 2", n)
}
if _, err := r2.Get("workers"); err != nil {
t.Errorf("Get(workers): %v", err)
}
if _, err := r2.Get("candidates"); err != nil {
t.Errorf("Get(candidates): %v", err)
}
}
func TestList_Sorted(t *testing.T) {
r, _ := mkRegistry(t)
_, _, _ = r.Register(context.Background(), "zoo", "fp", nil, nil)
_, _, _ = r.Register(context.Background(), "alpha", "fp", nil, nil)
_, _, _ = r.Register(context.Background(), "midway", "fp", nil, nil)
got := r.List()
want := []string{"alpha", "midway", "zoo"}
for i, m := range got {
if m.Name != want[i] {
t.Errorf("List[%d]: got %q, want %q", i, m.Name, want[i])
}
}
}
func TestRegister_RejectsEmptyInputs(t *testing.T) {
r, _ := mkRegistry(t)
_, _, err := r.Register(context.Background(), "", "fp", nil, nil)
if err == nil {
t.Error("expected error on empty name")
}
_, _, err = r.Register(context.Background(), "x", "", nil, nil)
if err == nil {
t.Error("expected error on empty fingerprint")
}
}