root fa56134b90 ADR-003 wiring: Bearer token + IP allowlist middleware
Implements the auth posture from ADR-003 (commit 0d18ffa). Two
independent layers — Bearer token (constant-time compare via
crypto/subtle) and IP allowlist (CIDR set) — composed in shared.Run
so every binary inherits the same gate without per-binary wiring.

Together with the bind-gate from commit 6af0520, this mechanically
closes audit risks R-001 + R-007:
  - non-loopback bind without auth.token = startup refuse
  - non-loopback bind WITH auth.token + override env = allowed
  - loopback bind = all gates open (G0 dev unchanged)

internal/shared/auth.go (NEW)
  RequireAuth(cfg AuthConfig) returns chi-compatible middleware.
  Empty Token + empty AllowedIPs → pass-through (G0 dev mode).
  Token-only → 401 Bearer mismatch.
  AllowedIPs-only → 403 source IP not in CIDR set.
  Both → both gates apply.
  /health bypasses both layers (load-balancer / liveness probes
  shouldn't carry tokens).

  CIDR parsing pre-runs at boot; bare IP (no /N) treated as /32 (or
  /128 for IPv6). Invalid entries log warn and drop, fail-loud-but-
  not-fatal so a typo doesn't kill the binary.

  Token comparison: subtle.ConstantTimeCompare on the full
  "Bearer <token>" wire-format string. Length-mismatch returns 0
  (per stdlib spec), so wrong-length tokens reject without timing
  leak. Pre-encoded comparison slice stored in the middleware
  closure — one allocation per request.

  Source-IP extraction prefers net.SplitHostPort fallback to
  RemoteAddr-as-is for httptest compatibility. X-Forwarded-For
  support is a follow-up when a trusted proxy fronts the gateway
  (config knob TBD per ADR-003 §"Future").

internal/shared/server.go
  Run signature: gained AuthConfig parameter (4th arg).
  /health stays mounted on the outer router (public).
  Registered routes go inside chi.Group with RequireAuth applied —
  empty config = transparent group.
  Added requireAuthOnNonLoopback startup check: non-loopback bind
  with empty Token = refuse to start (cites R-001 + R-007 by name).

internal/shared/config.go
  AuthConfig type added with TOML tags. Fields: Token, AllowedIPs.
  Composed into Config under [auth].

cmd/<svc>/main.go × 7 (catalogd, embedd, gateway, ingestd, queryd,
storaged, vectord, mcpd is unaffected — stdio doesn't bind a port)
  Each call site adds cfg.Auth as the 4th arg to shared.Run. No
  other changes — middleware applies via shared.Run uniformly.

internal/shared/auth_test.go (12 test funcs)
  Empty config pass-through, missing-token 401, wrong-token 401,
  correct-token 200, raw-token-without-Bearer-prefix 401, /health
  always public, IP allowlist allow + reject, bare IP /32, both
  layers when both configured, invalid CIDR drop-with-warn, RemoteAddr
  shape extraction. The constant-time comparison is verified by
  inspection (comments in auth.go) plus the existence of the
  passthrough test (length-mismatch case).

Verified:
  go test -count=1 ./internal/shared/  — all green (was 21, now 33 funcs)
  just verify                            — vet + test + 9 smokes 33s
  just proof contract                    — 53/0/1 unchanged

Smokes + proof harness keep working without any token configuration:
default Auth is empty struct → middleware is no-op → existing tests
pass unchanged. To exercise the gate, operators set [auth].token in
lakehouse.toml (or, per the "future" note in the ADR, via env var).

Closes audit findings:
  R-001 HIGH — fully mechanically closed (was: partial via bind gate)
  R-007 MED  — fully mechanically closed (was: design-only ADR-003)

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-29 07:11:34 -05:00

355 lines
11 KiB
Go

// vectord is the vector-search service. HNSW indexes keyed by
// string IDs with optional opaque JSON metadata. Wraps
// github.com/coder/hnsw (pure Go, no cgo).
//
// G1 + persistence: indexes are persisted to storaged at
// _vectors/<name>.{json,hnsw} and rehydrated on startup. Setting
// [vectord].storaged_url empty disables persistence (dev mode).
package main
import (
"context"
"encoding/json"
"errors"
"flag"
"log/slog"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/go-chi/chi/v5"
"git.agentview.dev/profit/golangLAKEHOUSE/internal/shared"
"git.agentview.dev/profit/golangLAKEHOUSE/internal/storeclient"
"git.agentview.dev/profit/golangLAKEHOUSE/internal/vectord"
)
const (
maxRequestBytes = 64 << 20 // 64 MiB cap for batched Add payloads
defaultK = 10
maxK = 1000
)
func main() {
configPath := flag.String("config", "lakehouse.toml", "path to TOML config")
flag.Parse()
cfg, err := shared.LoadConfig(*configPath)
if err != nil {
slog.Error("config", "err", err)
os.Exit(1)
}
h := &handlers{reg: vectord.NewRegistry()}
// Persistence is optional — empty StoragedURL = dev/ephemeral mode.
if cfg.Vectord.StoragedURL != "" {
h.persist = vectord.NewPersistor(storeclient.New(cfg.Vectord.StoragedURL))
// Rehydrate any persisted indexes at startup. Failures are
// logged-not-fatal: storaged might be coming up after vectord,
// and an index that failed to load is still recoverable by
// re-ingesting on top of an empty registry.
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
n, err := h.rehydrate(ctx)
cancel()
if err != nil {
slog.Warn("rehydrate", "err", err, "loaded", n)
} else {
slog.Info("rehydrated", "indexes", n)
}
}
if err := shared.Run("vectord", cfg.Vectord.Bind, h.register, cfg.Auth); err != nil {
slog.Error("server", "err", err)
os.Exit(1)
}
}
type handlers struct {
reg *vectord.Registry
persist *vectord.Persistor // nil when persistence is disabled
}
// rehydrate enumerates persisted indexes and loads each into the
// registry. Returns the count of successfully loaded indexes plus
// the first error (if any) — caller decides fatality.
func (h *handlers) rehydrate(ctx context.Context) (int, error) {
if h.persist == nil {
return 0, nil
}
names, err := h.persist.List(ctx)
if err != nil {
return 0, err
}
loaded := 0
for _, name := range names {
idx, err := h.persist.Load(ctx, name)
if err != nil {
slog.Warn("rehydrate skip", "name", name, "err", err)
continue
}
// The registry's Create rebuilds an empty Index from params;
// we want the LOADED one (with vectors). Bypass via a
// helper that registers a pre-built Index.
if err := h.reg.RegisterPrebuilt(idx); err != nil {
slog.Warn("rehydrate register", "name", name, "err", err)
continue
}
loaded++
}
return loaded, nil
}
// saveAfter is the post-write persistence hook. Logs-not-fatal:
// in-memory state is the source of truth in flight; a failed save
// gets re-attempted on the next mutation, and the operator log
// shows the storaged outage.
func (h *handlers) saveAfter(idx *vectord.Index) {
if h.persist == nil {
return
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
if err := h.persist.Save(ctx, idx); err != nil {
slog.Warn("persist save", "name", idx.Params().Name, "err", err)
}
}
// deleteAfter mirrors saveAfter for the Delete path.
func (h *handlers) deleteAfter(name string) {
if h.persist == nil {
return
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
if err := h.persist.Delete(ctx, name); err != nil {
slog.Warn("persist delete", "name", name, "err", err)
}
}
func (h *handlers) register(r chi.Router) {
r.Post("/vectors/index", h.handleCreate)
r.Get("/vectors/index", h.handleList)
r.Get("/vectors/index/{name}", h.handleGetIndex)
r.Delete("/vectors/index/{name}", h.handleDelete)
r.Post("/vectors/index/{name}/add", h.handleAdd)
r.Post("/vectors/index/{name}/search", h.handleSearch)
}
// createRequest mirrors POST /vectors/index body.
type createRequest struct {
Name string `json:"name"`
Dimension int `json:"dimension"`
M int `json:"m,omitempty"`
EfSearch int `json:"ef_search,omitempty"`
Distance string `json:"distance,omitempty"`
}
// indexInfo is the GET /vectors/index/{name} response shape.
type indexInfo struct {
Params vectord.IndexParams `json:"params"`
Length int `json:"length"`
}
// addRequest is the body for POST /vectors/index/{name}/add. Items
// are batched so callers can amortize HTTP overhead — the smoke
// inserts hundreds of vectors per request.
type addRequest struct {
Items []addItem `json:"items"`
}
type addItem struct {
ID string `json:"id"`
Vector []float32 `json:"vector"`
Metadata json.RawMessage `json:"metadata,omitempty"`
}
// searchRequest is the body for POST /vectors/index/{name}/search.
type searchRequest struct {
Vector []float32 `json:"vector"`
K int `json:"k,omitempty"`
}
type searchResponse struct {
Results []vectord.Result `json:"results"`
}
func (h *handlers) handleCreate(w http.ResponseWriter, r *http.Request) {
var req createRequest
if !decodeJSON(w, r, &req) {
return
}
idx, err := h.reg.Create(vectord.IndexParams{
Name: req.Name,
Dimension: req.Dimension,
M: req.M,
EfSearch: req.EfSearch,
Distance: req.Distance,
})
if errors.Is(err, vectord.ErrIndexAlreadyExists) {
http.Error(w, err.Error(), http.StatusConflict)
return
}
if errors.Is(err, vectord.ErrInvalidParams) || errors.Is(err, vectord.ErrUnknownDistance) {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if err != nil {
slog.Error("create index", "name", req.Name, "err", err)
http.Error(w, "internal", http.StatusInternalServerError)
return
}
h.saveAfter(idx)
writeJSON(w, http.StatusCreated, indexInfo{Params: idx.Params(), Length: idx.Len()})
}
func (h *handlers) handleList(w http.ResponseWriter, _ *http.Request) {
names := h.reg.Names()
writeJSON(w, http.StatusOK, map[string]any{"names": names, "count": len(names)})
}
func (h *handlers) handleGetIndex(w http.ResponseWriter, r *http.Request) {
name := chi.URLParam(r, "name")
idx, err := h.reg.Get(name)
if errors.Is(err, vectord.ErrIndexNotFound) {
http.Error(w, "not found", http.StatusNotFound)
return
}
if err != nil {
http.Error(w, "internal", http.StatusInternalServerError)
return
}
writeJSON(w, http.StatusOK, indexInfo{Params: idx.Params(), Length: idx.Len()})
}
func (h *handlers) handleDelete(w http.ResponseWriter, r *http.Request) {
name := chi.URLParam(r, "name")
if err := h.reg.Delete(name); errors.Is(err, vectord.ErrIndexNotFound) {
http.Error(w, "not found", http.StatusNotFound)
return
} else if err != nil {
http.Error(w, "internal", http.StatusInternalServerError)
return
}
h.deleteAfter(name)
w.WriteHeader(http.StatusNoContent)
}
func (h *handlers) handleAdd(w http.ResponseWriter, r *http.Request) {
name := chi.URLParam(r, "name")
idx, err := h.reg.Get(name)
if errors.Is(err, vectord.ErrIndexNotFound) {
http.Error(w, "not found", http.StatusNotFound)
return
}
var req addRequest
if !decodeJSON(w, r, &req) {
return
}
if len(req.Items) == 0 {
http.Error(w, "items must be non-empty", http.StatusBadRequest)
return
}
// Per scrum O-W4 (Opus, D5): pre-validate all items before any
// Add, so a bad item at position N doesn't leave items 0..N-1
// committed and item N rejected. Per scrum O-I3 (Opus, G1P):
// extend pre-validation to cover NaN/Inf and zero-norm — these
// were caught inside idx.Add but only after partial commits.
params := idx.Params()
dim := params.Dimension
for j, it := range req.Items {
if it.ID == "" {
http.Error(w, "items["+strconv.Itoa(j)+"]: empty id", http.StatusBadRequest)
return
}
if len(it.Vector) != dim {
http.Error(w, "items["+strconv.Itoa(j)+"]: dim mismatch (index="+strconv.Itoa(dim)+", got="+strconv.Itoa(len(it.Vector))+")", http.StatusBadRequest)
return
}
if err := vectord.ValidateVector(it.Vector, params.Distance); err != nil {
http.Error(w, "items["+strconv.Itoa(j)+"]: "+err.Error(), http.StatusBadRequest)
return
}
}
for j, it := range req.Items {
if err := idx.Add(it.ID, it.Vector, it.Metadata); err != nil {
// Vector-validation errors (NaN/Inf, zero-norm under
// cosine) only surface here; pre-validation is intentional
// minimal scope (id + dim only).
if errors.Is(err, vectord.ErrDimensionMismatch) ||
strings.Contains(err.Error(), "non-finite") ||
strings.Contains(err.Error(), "zero-norm") {
http.Error(w, "items["+strconv.Itoa(j)+"]: "+err.Error(), http.StatusBadRequest)
return
}
slog.Error("add", "name", name, "id", it.ID, "err", err)
http.Error(w, "internal", http.StatusInternalServerError)
return
}
}
// One save per batch (post-loop), not per item. Per scrum
// O-W4-style discipline: HTTP-batch boundary is the natural unit.
h.saveAfter(idx)
writeJSON(w, http.StatusOK, map[string]any{"added": len(req.Items), "length": idx.Len()})
}
func (h *handlers) handleSearch(w http.ResponseWriter, r *http.Request) {
name := chi.URLParam(r, "name")
idx, err := h.reg.Get(name)
if errors.Is(err, vectord.ErrIndexNotFound) {
http.Error(w, "not found", http.StatusNotFound)
return
}
var req searchRequest
if !decodeJSON(w, r, &req) {
return
}
k := req.K
if k <= 0 {
k = defaultK
}
if k > maxK {
k = maxK
}
hits, err := idx.Search(req.Vector, k)
if errors.Is(err, vectord.ErrDimensionMismatch) {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if err != nil {
slog.Error("search", "name", name, "err", err)
http.Error(w, "internal", http.StatusInternalServerError)
return
}
writeJSON(w, http.StatusOK, searchResponse{Results: hits})
}
// decodeJSON reads + decodes a JSON body with a body-size cap.
// Returns false (and writes the error response) on failure.
func decodeJSON(w http.ResponseWriter, r *http.Request, v any) bool {
defer r.Body.Close()
r.Body = http.MaxBytesReader(w, r.Body, maxRequestBytes)
if err := json.NewDecoder(r.Body).Decode(v); err != nil {
var maxErr *http.MaxBytesError
if errors.As(err, &maxErr) || strings.Contains(err.Error(), "http: request body too large") {
http.Error(w, "body too large", http.StatusRequestEntityTooLarge)
return false
}
http.Error(w, "decode body: "+err.Error(), http.StatusBadRequest)
return false
}
return true
}
func writeJSON(w http.ResponseWriter, code int, v any) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
if err := json.NewEncoder(w).Encode(v); err != nil {
slog.Warn("write json", "err", err)
}
}